diff options
730 files changed, 21092 insertions, 21121 deletions
diff --git a/bolt/include/bolt/Rewrite/RewriteInstance.h b/bolt/include/bolt/Rewrite/RewriteInstance.h index 19dcce8..0fe2e32 100644 --- a/bolt/include/bolt/Rewrite/RewriteInstance.h +++ b/bolt/include/bolt/Rewrite/RewriteInstance.h @@ -249,12 +249,11 @@ private: /// Analyze relocation \p Rel. /// Return true if the relocation was successfully processed, false otherwise. /// The \p SymbolName, \p SymbolAddress, \p Addend and \p ExtractedValue - /// parameters will be set on success. The \p Skip argument indicates - /// that the relocation was analyzed, but it must not be processed. + /// parameters will be set on success. bool analyzeRelocation(const object::RelocationRef &Rel, uint32_t &RType, std::string &SymbolName, bool &IsSectionRelocation, uint64_t &SymbolAddress, int64_t &Addend, - uint64_t &ExtractedValue, bool &Skip) const; + uint64_t &ExtractedValue) const; /// Rewrite non-allocatable sections with modifications. void rewriteNoteSections(); diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp index 98440cd..b7ded6b 100644 --- a/bolt/lib/Core/BinaryContext.cpp +++ b/bolt/lib/Core/BinaryContext.cpp @@ -1662,7 +1662,7 @@ void BinaryContext::preprocessDWODebugInfo() { "files.\n"; } // Prevent failures when DWOName is already an absolute path. - sys::fs::make_absolute(DWOCompDir, AbsolutePath); + sys::path::make_absolute(DWOCompDir, AbsolutePath); DWARFUnit *DWOCU = DwarfUnit->getNonSkeletonUnitDIE(false, AbsolutePath).getDwarfUnit(); if (!DWOCU->isDWOUnit()) { diff --git a/bolt/lib/Core/Relocation.cpp b/bolt/lib/Core/Relocation.cpp index f882627..4b827b6 100644 --- a/bolt/lib/Core/Relocation.cpp +++ b/bolt/lib/Core/Relocation.cpp @@ -81,7 +81,6 @@ static bool isSupportedAArch64(uint32_t Type) { case ELF::R_AARCH64_LD64_GOT_LO12_NC: case ELF::R_AARCH64_TLSDESC_LD64_LO12: case ELF::R_AARCH64_TLSDESC_ADD_LO12: - case ELF::R_AARCH64_TLSDESC_CALL: case ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: case ELF::R_AARCH64_PREL16: case ELF::R_AARCH64_PREL32: @@ -193,7 +192,6 @@ static size_t getSizeForTypeAArch64(uint32_t Type) { case ELF::R_AARCH64_LD64_GOT_LO12_NC: case ELF::R_AARCH64_TLSDESC_LD64_LO12: case ELF::R_AARCH64_TLSDESC_ADD_LO12: - case ELF::R_AARCH64_TLSDESC_CALL: case ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: case ELF::R_AARCH64_PREL32: case ELF::R_AARCH64_MOVW_UABS_G0: @@ -248,7 +246,14 @@ static bool skipRelocationTypeX86(uint32_t Type) { } static bool skipRelocationTypeAArch64(uint32_t Type) { - return Type == ELF::R_AARCH64_NONE || Type == ELF::R_AARCH64_LD_PREL_LO19; + switch (Type) { + default: + return false; + case ELF::R_AARCH64_NONE: + case ELF::R_AARCH64_LD_PREL_LO19: + case ELF::R_AARCH64_TLSDESC_CALL: + return true; + } } static bool skipRelocationTypeRISCV(uint32_t Type) { @@ -362,7 +367,6 @@ static uint64_t extractValueAArch64(uint32_t Type, uint64_t Contents, return static_cast<int64_t>(PC) + SignExtend64<32>(Contents & 0xffffffff); case ELF::R_AARCH64_PREL64: return static_cast<int64_t>(PC) + Contents; - case ELF::R_AARCH64_TLSDESC_CALL: case ELF::R_AARCH64_JUMP26: case ELF::R_AARCH64_CALL26: // Immediate goes in bits 25:0 of B and BL. @@ -552,7 +556,6 @@ static bool isGOTAArch64(uint32_t Type) { case ELF::R_AARCH64_TLSDESC_ADR_PAGE21: case ELF::R_AARCH64_TLSDESC_LD64_LO12: case ELF::R_AARCH64_TLSDESC_ADD_LO12: - case ELF::R_AARCH64_TLSDESC_CALL: return true; } } @@ -591,7 +594,6 @@ static bool isTLSAArch64(uint32_t Type) { case ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: case ELF::R_AARCH64_TLSDESC_LD64_LO12: case ELF::R_AARCH64_TLSDESC_ADD_LO12: - case ELF::R_AARCH64_TLSDESC_CALL: case ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: return true; } @@ -667,7 +669,6 @@ static bool isPCRelativeAArch64(uint32_t Type) { case ELF::R_AARCH64_MOVW_UABS_G2_NC: case ELF::R_AARCH64_MOVW_UABS_G3: return false; - case ELF::R_AARCH64_TLSDESC_CALL: case ELF::R_AARCH64_CALL26: case ELF::R_AARCH64_JUMP26: case ELF::R_AARCH64_TSTBR14: diff --git a/bolt/lib/Rewrite/DWARFRewriter.cpp b/bolt/lib/Rewrite/DWARFRewriter.cpp index 5c89a42..7366d2a 100644 --- a/bolt/lib/Rewrite/DWARFRewriter.cpp +++ b/bolt/lib/Rewrite/DWARFRewriter.cpp @@ -1853,7 +1853,7 @@ void DWARFRewriter::writeDWOFiles( else if (!sys::fs::exists(CompDir)) CompDir = "."; // Prevent failures when DWOName is already an absolute path. - sys::fs::make_absolute(CompDir, AbsolutePath); + sys::path::make_absolute(CompDir, AbsolutePath); std::error_code EC; std::unique_ptr<ToolOutputFile> TempOut = diff --git a/bolt/lib/Rewrite/RewriteInstance.cpp b/bolt/lib/Rewrite/RewriteInstance.cpp index 8b78c53..c13a9f0 100644 --- a/bolt/lib/Rewrite/RewriteInstance.cpp +++ b/bolt/lib/Rewrite/RewriteInstance.cpp @@ -2274,8 +2274,7 @@ uint32_t getRelocationSymbol(const ELFObjectFileBase *Obj, bool RewriteInstance::analyzeRelocation( const RelocationRef &Rel, uint32_t &RType, std::string &SymbolName, bool &IsSectionRelocation, uint64_t &SymbolAddress, int64_t &Addend, - uint64_t &ExtractedValue, bool &Skip) const { - Skip = false; + uint64_t &ExtractedValue) const { if (!Relocation::isSupported(RType)) return false; @@ -2707,9 +2706,8 @@ void RewriteInstance::handleRelocation(const SectionRef &RelocatedSection, int64_t Addend; uint64_t ExtractedValue; bool IsSectionRelocation; - bool Skip; if (!analyzeRelocation(Rel, RType, SymbolName, IsSectionRelocation, - SymbolAddress, Addend, ExtractedValue, Skip)) { + SymbolAddress, Addend, ExtractedValue)) { LLVM_DEBUG({ dbgs() << "BOLT-WARNING: failed to analyze relocation @ offset = " << formatv("{0:x}; type name = {1}\n", Rel.getOffset(), TypeName); @@ -2718,14 +2716,6 @@ void RewriteInstance::handleRelocation(const SectionRef &RelocatedSection, return; } - if (Skip) { - LLVM_DEBUG({ - dbgs() << "BOLT-DEBUG: skipping relocation @ offset = " - << formatv("{0:x}; type name = {1}\n", Rel.getOffset(), TypeName); - }); - return; - } - if (!IsFromCode && !IsWritable && (IsX86 || IsAArch64) && Relocation::isPCRelative(RType)) { BinaryData *BD = BC->getBinaryDataContainingAddress(Rel.getOffset()); diff --git a/bolt/test/AArch64/tls-desc-call.s b/bolt/test/AArch64/tls-desc-call.s new file mode 100644 index 0000000..0575380 --- /dev/null +++ b/bolt/test/AArch64/tls-desc-call.s @@ -0,0 +1,35 @@ +# RUN: %clang %cflags %s -o %t.so -fPIC -shared -Wl,-q +# RUN: llvm-bolt %t.so -o %t.bolt --debug-only=bolt 2>&1 | FileCheck %s + +# REQUIRES: asserts + +## Verify that R_AARCH64_TLSDESC_CALL relocations are ignored + +# CHECK-NOT: Relocation {{.*}} R_AARCH64_TLSDESC_CALL + + .text + .globl get_tls_var + .p2align 2 + .type get_tls_var,@function +get_tls_var: + .cfi_startproc + str x30, [sp, #-16]! + adrp x0, :tlsdesc:tls_var + ldr x1, [x0, :tlsdesc_lo12:tls_var] + add x0, x0, :tlsdesc_lo12:tls_var + .tlsdesccall tls_var + blr x1 + mrs x8, TPIDR_EL0 + ldr w0, [x8, x0] + ldr x30, [sp], #16 + ret + .size get_tls_var, .-get_tls_var + .cfi_endproc + + .type tls_var,@object + .section .tdata,"awT",@progbits + .globl tls_var + .p2align 2, 0x0 +tls_var: + .word 42 + .size tls_var, 4 diff --git a/clang-tools-extra/clang-apply-replacements/lib/Tooling/ApplyReplacements.cpp b/clang-tools-extra/clang-apply-replacements/lib/Tooling/ApplyReplacements.cpp index b895075..0ac8f71 100644 --- a/clang-tools-extra/clang-apply-replacements/lib/Tooling/ApplyReplacements.cpp +++ b/clang-tools-extra/clang-apply-replacements/lib/Tooling/ApplyReplacements.cpp @@ -142,7 +142,7 @@ groupReplacements(const TUReplacements &TUs, const TUDiagnostics &TUDs, // build directories, make them absolute immediately. SmallString<128> Path = R.getFilePath(); if (BuildDir) - llvm::sys::fs::make_absolute(*BuildDir, Path); + llvm::sys::path::make_absolute(*BuildDir, Path); else SM.getFileManager().makeAbsolutePath(Path); diff --git a/clang-tools-extra/clang-move/Move.cpp b/clang-tools-extra/clang-move/Move.cpp index 17f5971..519d3599 100644 --- a/clang-tools-extra/clang-move/Move.cpp +++ b/clang-tools-extra/clang-move/Move.cpp @@ -75,7 +75,7 @@ std::string MakeAbsolutePath(StringRef CurrentDir, StringRef Path) { return ""; llvm::SmallString<128> InitialDirectory(CurrentDir); llvm::SmallString<128> AbsolutePath(Path); - llvm::sys::fs::make_absolute(InitialDirectory, AbsolutePath); + llvm::sys::path::make_absolute(InitialDirectory, AbsolutePath); return CleanPath(std::move(AbsolutePath)); } diff --git a/clang-tools-extra/clangd/ConfigCompile.cpp b/clang-tools-extra/clangd/ConfigCompile.cpp index 962a48b..18e3180 100644 --- a/clang-tools-extra/clangd/ConfigCompile.cpp +++ b/clang-tools-extra/clangd/ConfigCompile.cpp @@ -131,7 +131,7 @@ struct FragmentCompiler { return std::nullopt; } llvm::SmallString<256> AbsPath = llvm::StringRef(*Path); - llvm::sys::fs::make_absolute(FragmentDirectory, AbsPath); + llvm::sys::path::make_absolute(FragmentDirectory, AbsPath); llvm::sys::path::native(AbsPath, Style); return AbsPath.str().str(); } diff --git a/clang-tools-extra/clangd/SystemIncludeExtractor.cpp b/clang-tools-extra/clangd/SystemIncludeExtractor.cpp index 106de1b..4a5cd3b 100644 --- a/clang-tools-extra/clangd/SystemIncludeExtractor.cpp +++ b/clang-tools-extra/clangd/SystemIncludeExtractor.cpp @@ -106,7 +106,7 @@ struct DriverArgs { // relative or absolute). if (llvm::any_of(Driver, [](char C) { return llvm::sys::path::is_separator(C); })) { - llvm::sys::fs::make_absolute(Cmd.Directory, Driver); + llvm::sys::path::make_absolute(Cmd.Directory, Driver); } this->Driver = Driver.str().str(); for (size_t I = 0, E = Cmd.CommandLine.size(); I < E; ++I) { diff --git a/clang-tools-extra/clangd/index/SymbolCollector.cpp b/clang-tools-extra/clangd/index/SymbolCollector.cpp index 6bdb108..39c479b 100644 --- a/clang-tools-extra/clangd/index/SymbolCollector.cpp +++ b/clang-tools-extra/clangd/index/SymbolCollector.cpp @@ -325,7 +325,7 @@ private: if (R.second) { llvm::SmallString<256> AbsPath = Path; if (!llvm::sys::path::is_absolute(AbsPath) && !FallbackDir.empty()) - llvm::sys::fs::make_absolute(FallbackDir, AbsPath); + llvm::sys::path::make_absolute(FallbackDir, AbsPath); assert(llvm::sys::path::is_absolute(AbsPath) && "If the VFS can't make paths absolute, a FallbackDir must be " "provided"); diff --git a/clang-tools-extra/clangd/tool/ClangdMain.cpp b/clang-tools-extra/clangd/tool/ClangdMain.cpp index 4de2f21..4a990f8 100644 --- a/clang-tools-extra/clangd/tool/ClangdMain.cpp +++ b/clang-tools-extra/clangd/tool/ClangdMain.cpp @@ -578,7 +578,7 @@ public: Body = Body.ltrim('/'); llvm::SmallString<16> Path(Body); path::native(Path); - fs::make_absolute(TestScheme::TestDir, Path); + path::make_absolute(TestScheme::TestDir, Path); return std::string(Path); } diff --git a/clang-tools-extra/include-cleaner/tool/IncludeCleaner.cpp b/clang-tools-extra/include-cleaner/tool/IncludeCleaner.cpp index 372ab5f..fefbfc3 100644 --- a/clang-tools-extra/include-cleaner/tool/IncludeCleaner.cpp +++ b/clang-tools-extra/include-cleaner/tool/IncludeCleaner.cpp @@ -344,7 +344,7 @@ mapInputsToAbsPaths(clang::tooling::CompilationDatabase &CDB, } for (const auto &Cmd : Cmds) { llvm::SmallString<256> CDBPath(Cmd.Filename); - llvm::sys::fs::make_absolute(Cmd.Directory, CDBPath); + llvm::sys::path::make_absolute(Cmd.Directory, CDBPath); CDBToAbsPaths[std::string(CDBPath)] = std::string(AbsPath); } } diff --git a/clang/CMakeLists.txt b/clang/CMakeLists.txt index 4eaa7128..e4cb1a3 100644 --- a/clang/CMakeLists.txt +++ b/clang/CMakeLists.txt @@ -754,11 +754,22 @@ if (CLANG_ENABLE_BOOTSTRAP) if(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED) add_dependencies(clang-bootstrap-deps llvm-profdata) set(PGO_OPT -DLLVM_PROFDATA=${LLVM_RUNTIME_OUTPUT_INTDIR}/llvm-profdata) + string(TOUPPER "${BOOTSTRAP_LLVM_BUILD_INSTRUMENTED}" BOOTSTRAP_LLVM_BUILD_INSTRUMENTED) + if (BOOTSTRAP_LLVM_BUILD_INSTRUMENTED STREQUAL "CSSPGO") + add_dependencies(clang-bootstrap-deps llvm-profgen) + list(APPEND PGO_OPT -DLLVM_PROFGEN=${LLVM_RUNTIME_OUTPUT_INTDIR}/llvm-profgen) + endif() endif() if(LLVM_BUILD_INSTRUMENTED) - add_dependencies(clang-bootstrap-deps generate-profdata) - set(PGO_OPT -DLLVM_PROFDATA_FILE=${CMAKE_CURRENT_BINARY_DIR}/utils/perf-training/clang.profdata) + string(TOUPPER "${LLVM_BUILD_INSTRUMENTED}" LLVM_BUILD_INSTRUMENTED) + if (LLVM_BUILD_INSTRUMENTED STREQUAL "CSSPGO") + add_dependencies(clang-bootstrap-deps generate-sprofdata) + set(PGO_OPT -DLLVM_SPROFDATA_FILE=${CMAKE_CURRENT_BINARY_DIR}/utils/perf-training/clang.sprofdata) + else() + add_dependencies(clang-bootstrap-deps generate-profdata) + set(PGO_OPT -DLLVM_PROFDATA_FILE=${CMAKE_CURRENT_BINARY_DIR}/utils/perf-training/clang.profdata) + endif() # Use the current tools for LTO instead of the instrumented ones list(APPEND _BOOTSTRAP_DEFAULT_PASSTHROUGH CMAKE_CXX_COMPILER diff --git a/clang/cmake/caches/BOLT-CSSPGO.cmake b/clang/cmake/caches/BOLT-CSSPGO.cmake new file mode 100644 index 0000000..b1c204a --- /dev/null +++ b/clang/cmake/caches/BOLT-CSSPGO.cmake @@ -0,0 +1,3 @@ +set(BOLT_PGO_CMAKE_CACHE "CSSPGO" CACHE STRING "") +set(BOOTSTRAP_CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "") +include(${CMAKE_CURRENT_LIST_DIR}/BOLT-PGO.cmake) diff --git a/clang/cmake/caches/BOLT-PGO.cmake b/clang/cmake/caches/BOLT-PGO.cmake index 1a04ca9..cc9410f 100644 --- a/clang/cmake/caches/BOLT-PGO.cmake +++ b/clang/cmake/caches/BOLT-PGO.cmake @@ -1,3 +1,4 @@ +set(BOLT_PGO_CMAKE_CACHE "PGO" CACHE STRING "") set(LLVM_ENABLE_PROJECTS "bolt;clang;lld" CACHE STRING "") set(CLANG_BOOTSTRAP_TARGETS @@ -14,4 +15,4 @@ set(BOOTSTRAP_CLANG_BOOTSTRAP_TARGETS set(PGO_BUILD_CONFIGURATION ${CMAKE_CURRENT_LIST_DIR}/BOLT.cmake CACHE STRING "") -include(${CMAKE_CURRENT_LIST_DIR}/PGO.cmake) +include(${CMAKE_CURRENT_LIST_DIR}/${BOLT_PGO_CMAKE_CACHE}.cmake) diff --git a/clang/cmake/caches/CSSPGO.cmake b/clang/cmake/caches/CSSPGO.cmake new file mode 100644 index 0000000..59e08a6 --- /dev/null +++ b/clang/cmake/caches/CSSPGO.cmake @@ -0,0 +1,2 @@ +set(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED "CSSPGO" CACHE STRING "") +include(${CMAKE_CURRENT_LIST_DIR}/PGO.cmake) diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index 97799ae..b1ddfa0 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -246,6 +246,8 @@ Non-comprehensive list of changes in this release - ``__builtin_assume_dereferenceable`` now accepts non-constant size operands. +- Fixed a crash when the second argument to ``__builtin_assume_aligned`` was not constant (#GH161314) + New Compiler Flags ------------------ - New option ``-fno-sanitize-debug-trap-reasons`` added to disable emitting trap reasons into the debug info when compiling with trapping UBSan (e.g. ``-fsanitize-trap=undefined``). diff --git a/clang/include/clang/AST/CharUnits.h b/clang/include/clang/AST/CharUnits.h index c06354451..e570bfa 100644 --- a/clang/include/clang/AST/CharUnits.h +++ b/clang/include/clang/AST/CharUnits.h @@ -141,7 +141,7 @@ namespace clang { /// Among other things, this promises that /// self.alignTo(N) will just return self. bool isMultipleOf(CharUnits N) const { - return (*this % N) == 0; + return (*this % N) == CharUnits::Zero(); } // Arithmetic operators. @@ -165,8 +165,8 @@ namespace clang { CharUnits operator% (QuantityType N) const { return CharUnits(Quantity % N); } - QuantityType operator% (const CharUnits &Other) const { - return Quantity % Other.Quantity; + CharUnits operator%(const CharUnits &Other) const { + return CharUnits(Quantity % Other.Quantity); } CharUnits operator+ (const CharUnits &Other) const { return CharUnits(Quantity + Other.Quantity); diff --git a/clang/include/clang/AST/Decl.h b/clang/include/clang/AST/Decl.h index d85d04d..406d79e 100644 --- a/clang/include/clang/AST/Decl.h +++ b/clang/include/clang/AST/Decl.h @@ -80,6 +80,7 @@ class TypeAliasTemplateDecl; class UnresolvedSetImpl; class VarTemplateDecl; enum class ImplicitParamKind; +struct UsualDeleteParams; // Holds a constraint expression along with a pack expansion index, if // expanded. @@ -2646,6 +2647,8 @@ public: bool isTypeAwareOperatorNewOrDelete() const; void setIsTypeAwareOperatorNewOrDelete(bool IsTypeAwareOperator = true); + UsualDeleteParams getUsualDeleteParams() const; + /// Compute the language linkage. LanguageLinkage getLanguageLinkage() const; diff --git a/clang/include/clang/AST/ExprCXX.h b/clang/include/clang/AST/ExprCXX.h index 5f16bac..d78c7b6 100644 --- a/clang/include/clang/AST/ExprCXX.h +++ b/clang/include/clang/AST/ExprCXX.h @@ -2342,6 +2342,14 @@ struct ImplicitDeallocationParameters { SizedDeallocationMode PassSize; }; +/// The parameters to pass to a usual operator delete. +struct UsualDeleteParams { + TypeAwareAllocationMode TypeAwareDelete = TypeAwareAllocationMode::No; + bool DestroyingDelete = false; + bool Size = false; + AlignedAllocationMode Alignment = AlignedAllocationMode::No; +}; + /// Represents a new-expression for memory allocation and constructor /// calls, e.g: "new CXXNewExpr(foo)". class CXXNewExpr final diff --git a/clang/include/clang/Basic/LangOptions.h b/clang/include/clang/Basic/LangOptions.h index a8943df..41595ec 100644 --- a/clang/include/clang/Basic/LangOptions.h +++ b/clang/include/clang/Basic/LangOptions.h @@ -549,8 +549,7 @@ public: bool CheckNew = false; /// The HLSL root signature version for dxil. - llvm::dxbc::RootSignatureVersion HLSLRootSigVer = - llvm::dxbc::RootSignatureVersion::V1_1; + llvm::dxbc::RootSignatureVersion HLSLRootSigVer; /// The HLSL root signature that will be used to overide the root signature /// used for the shader entry point. diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 3f83c30..8a5bf03 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -148,9 +148,10 @@ public: } mlir::Value createComplexReal(mlir::Location loc, mlir::Value operand) { - auto operandTy = mlir::cast<cir::ComplexType>(operand.getType()); - return cir::ComplexRealOp::create(*this, loc, operandTy.getElementType(), - operand); + auto resultType = operand.getType(); + if (auto complexResultType = mlir::dyn_cast<cir::ComplexType>(resultType)) + resultType = complexResultType.getElementType(); + return cir::ComplexRealOp::create(*this, loc, resultType, operand); } mlir::Value createComplexImag(mlir::Location loc, mlir::Value operand) { diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index f857cf8..0a78492 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3260,18 +3260,20 @@ def CIR_ComplexCreateOp : CIR_Op<"complex.create", [Pure, SameTypeOperands]> { def CIR_ComplexRealOp : CIR_Op<"complex.real", [Pure]> { let summary = "Extract the real part of a complex value"; let description = [{ - `cir.complex.real` operation takes an operand of `!cir.complex` type and - yields the real part of it. + `cir.complex.real` operation takes an operand of `!cir.complex`, `!cir.int` + or `!cir.float`. If the operand is `!cir.complex`, the real part of it will + be returned, otherwise the value returned unmodified. Example: ```mlir - %1 = cir.complex.real %0 : !cir.complex<!cir.float> -> !cir.float + %real = cir.complex.real %complex : !cir.complex<!cir.float> -> !cir.float + %real = cir.complex.real %scalar : !cir.float -> !cir.float ``` }]; let results = (outs CIR_AnyIntOrFloatType:$result); - let arguments = (ins CIR_ComplexType:$operand); + let arguments = (ins CIR_AnyComplexOrIntOrFloatType:$operand); let assemblyFormat = [{ $operand `:` qualified(type($operand)) `->` qualified(type($result)) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td b/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td index 82f6e1d..da03a29 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td @@ -165,6 +165,12 @@ def CIR_AnyIntOrFloatType : AnyTypeOf<[CIR_AnyFloatType, CIR_AnyIntType], def CIR_AnyComplexType : CIR_TypeBase<"::cir::ComplexType", "complex type">; +def CIR_AnyComplexOrIntOrFloatType : AnyTypeOf<[ + CIR_AnyComplexType, CIR_AnyFloatType, CIR_AnyIntType +], "complex, integer or floating point type"> { + let cppFunctionName = "isComplexOrIntegerOrFloatingPointType"; +} + //===----------------------------------------------------------------------===// // Array Type predicates //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 6245cf33..096df56 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -9473,7 +9473,7 @@ def target_profile : DXCJoinedOrSeparate<"T">, MetaVarName<"<profile>">, "lib_6_3, lib_6_4, lib_6_5, lib_6_6, lib_6_7, lib_6_x," "ms_6_5, ms_6_6, ms_6_7," "as_6_5, as_6_6, as_6_7," - "rootsig_1_0, rootsig_1_1">; + "rootsig_1_0, rootsig_1_1, rootsig_1_2">; def emit_pristine_llvm : DXCFlag<"emit-pristine-llvm">, HelpText<"Emit pristine LLVM IR from the frontend by not running any LLVM passes at all." "Same as -S + -emit-llvm + -disable-llvm-passes.">; @@ -9486,9 +9486,9 @@ def fdx_rootsignature_version : Group<dxc_Group>, Visibility<[ClangOption, CC1Option]>, HelpText<"Root Signature Version">, - Values<"rootsig_1_0,rootsig_1_1">, + Values<"rootsig_1_0,rootsig_1_1,rootsig_1_2">, NormalizedValuesScope<"llvm::dxbc::RootSignatureVersion">, - NormalizedValues<["V1_0", "V1_1"]>, + NormalizedValues<["V1_0", "V1_1", "V1_2"]>, MarshallingInfoEnum<LangOpts<"HLSLRootSigVer">, "V1_1">; def dxc_rootsig_ver : Separate<["/", "-"], "force-rootsig-ver">, diff --git a/clang/include/clang/Lex/HLSLRootSignatureTokenKinds.def b/clang/include/clang/Lex/HLSLRootSignatureTokenKinds.def index a5cfeb3..1d7f7ad 100644 --- a/clang/include/clang/Lex/HLSLRootSignatureTokenKinds.def +++ b/clang/include/clang/Lex/HLSLRootSignatureTokenKinds.def @@ -65,6 +65,9 @@ #ifndef STATIC_BORDER_COLOR_ENUM #define STATIC_BORDER_COLOR_ENUM(NAME, LIT) ENUM(NAME, LIT) #endif +#ifndef STATIC_SAMPLER_FLAG_ENUM +#define STATIC_SAMPLER_FLAG_ENUM(NAME, LIT) ENUM(NAME, LIT) +#endif // General Tokens: TOK(invalid, "invalid identifier") @@ -228,6 +231,10 @@ STATIC_BORDER_COLOR_ENUM(OpaqueWhite, "STATIC_BORDER_COLOR_OPAQUE_WHITE") STATIC_BORDER_COLOR_ENUM(OpaqueBlackUint, "STATIC_BORDER_COLOR_OPAQUE_BLACK_UINT") STATIC_BORDER_COLOR_ENUM(OpaqueWhiteUint, "STATIC_BORDER_COLOR_OPAQUE_WHITE_UINT") +// Root Descriptor Flag Enums: +STATIC_SAMPLER_FLAG_ENUM(UintBorderColor, "UINT_BORDER_COLOR") +STATIC_SAMPLER_FLAG_ENUM(NonNormalizedCoordinates, "NON_NORMALIZED_COORDINATES") + #undef STATIC_BORDER_COLOR_ENUM #undef COMPARISON_FUNC_ENUM #undef TEXTURE_ADDRESS_MODE_ENUM @@ -237,6 +244,7 @@ STATIC_BORDER_COLOR_ENUM(OpaqueWhiteUint, "STATIC_BORDER_COLOR_OPAQUE_WHITE_UINT #undef DESCRIPTOR_RANGE_FLAG_ENUM_OFF #undef DESCRIPTOR_RANGE_FLAG_ENUM_ON #undef ROOT_DESCRIPTOR_FLAG_ENUM +#undef STATIC_SAMPLER_FLAG_ENUM #undef ROOT_FLAG_ENUM #undef DESCRIPTOR_RANGE_OFFSET_ENUM #undef UNBOUNDED_ENUM diff --git a/clang/include/clang/Parse/ParseHLSLRootSignature.h b/clang/include/clang/Parse/ParseHLSLRootSignature.h index b06846f..8f91d7c 100644 --- a/clang/include/clang/Parse/ParseHLSLRootSignature.h +++ b/clang/include/clang/Parse/ParseHLSLRootSignature.h @@ -130,6 +130,7 @@ private: std::optional<float> MaxLOD; std::optional<uint32_t> Space; std::optional<llvm::dxbc::ShaderVisibility> Visibility; + std::optional<llvm::dxbc::StaticSamplerFlags> Flags; }; std::optional<ParsedStaticSamplerParams> parseStaticSamplerParams(); @@ -153,6 +154,8 @@ private: parseRootDescriptorFlags(RootSignatureToken::Kind Context); std::optional<llvm::dxbc::DescriptorRangeFlags> parseDescriptorRangeFlags(RootSignatureToken::Kind Context); + std::optional<llvm::dxbc::StaticSamplerFlags> + parseStaticSamplerFlags(RootSignatureToken::Kind Context); /// Use NumericLiteralParser to convert CurToken.NumSpelling into a unsigned /// 32-bit integer diff --git a/clang/lib/AST/APValue.cpp b/clang/lib/AST/APValue.cpp index 7173c2a..2e1c8eb 100644 --- a/clang/lib/AST/APValue.cpp +++ b/clang/lib/AST/APValue.cpp @@ -784,7 +784,7 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy, if (!O.isZero()) { if (IsReference) Out << "*("; - if (S.isZero() || O % S) { + if (S.isZero() || !O.isMultipleOf(S)) { Out << "(char*)"; S = CharUnits::One(); } diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp index cd8e495..c734155 100644 --- a/clang/lib/AST/Decl.cpp +++ b/clang/lib/AST/Decl.cpp @@ -3552,6 +3552,53 @@ void FunctionDecl::setIsTypeAwareOperatorNewOrDelete(bool IsTypeAware) { getASTContext().setIsTypeAwareOperatorNewOrDelete(this, IsTypeAware); } +UsualDeleteParams FunctionDecl::getUsualDeleteParams() const { + UsualDeleteParams Params; + + // This function should only be called for operator delete declarations. + assert(getDeclName().isAnyOperatorDelete()); + if (!getDeclName().isAnyOperatorDelete()) + return Params; + + const FunctionProtoType *FPT = getType()->castAs<FunctionProtoType>(); + auto AI = FPT->param_type_begin(), AE = FPT->param_type_end(); + + if (isTypeAwareOperatorNewOrDelete()) { + Params.TypeAwareDelete = TypeAwareAllocationMode::Yes; + assert(AI != AE); + ++AI; + } + + // The first argument after the type-identity parameter (if any) is + // always a void* (or C* for a destroying operator delete for class + // type C). + ++AI; + + // The next parameter may be a std::destroying_delete_t. + if (isDestroyingOperatorDelete()) { + assert(!isTypeAwareAllocation(Params.TypeAwareDelete)); + Params.DestroyingDelete = true; + assert(AI != AE); + ++AI; + } + + // Figure out what other parameters we should be implicitly passing. + if (AI != AE && (*AI)->isIntegerType()) { + Params.Size = true; + ++AI; + } else + assert(!isTypeAwareAllocation(Params.TypeAwareDelete)); + + if (AI != AE && (*AI)->isAlignValT()) { + Params.Alignment = AlignedAllocationMode::Yes; + ++AI; + } else + assert(!isTypeAwareAllocation(Params.TypeAwareDelete)); + + assert(AI == AE && "unexpected usual deallocation function parameter"); + return Params; +} + LanguageLinkage FunctionDecl::getLanguageLinkage() const { return getDeclLanguageLinkage(*this); } diff --git a/clang/lib/AST/RecordLayoutBuilder.cpp b/clang/lib/AST/RecordLayoutBuilder.cpp index 43f4e07..00b938b 100644 --- a/clang/lib/AST/RecordLayoutBuilder.cpp +++ b/clang/lib/AST/RecordLayoutBuilder.cpp @@ -2087,9 +2087,8 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, if (InsertExtraPadding) { CharUnits ASanAlignment = CharUnits::fromQuantity(8); CharUnits ExtraSizeForAsan = ASanAlignment; - if (FieldSize % ASanAlignment) - ExtraSizeForAsan += - ASanAlignment - CharUnits::fromQuantity(FieldSize % ASanAlignment); + if (!FieldSize.isMultipleOf(ASanAlignment)) + ExtraSizeForAsan += ASanAlignment - (FieldSize % ASanAlignment); EffectiveFieldSize = FieldSize = FieldSize + ExtraSizeForAsan; } @@ -2119,10 +2118,10 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, if (RD->hasAttr<PackedAttr>() || !MaxFieldAlignment.isZero()) if (FieldAlign < OriginalFieldAlign) if (D->getType()->isRecordType()) { - // If the offset is a multiple of the alignment of + // If the offset is not a multiple of the alignment of // the type, raise the warning. // TODO: Takes no account the alignment of the outer struct - if (FieldOffset % OriginalFieldAlign != 0) + if (!FieldOffset.isMultipleOf(OriginalFieldAlign)) Diag(D->getLocation(), diag::warn_unaligned_access) << Context.getCanonicalTagType(RD) << D->getName() << D->getType(); diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp index 8f7fe3b..cf5e914 100644 --- a/clang/lib/AST/TextNodeDumper.cpp +++ b/clang/lib/AST/TextNodeDumper.cpp @@ -3095,6 +3095,9 @@ void TextNodeDumper::VisitHLSLRootSignatureDecl( case llvm::dxbc::RootSignatureVersion::V1_1: OS << "1.1"; break; + case llvm::dxbc::RootSignatureVersion::V1_2: + OS << "1.2"; + break; } OS << ", "; llvm::hlsl::rootsig::dumpRootElements(OS, D->getRootElements()); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp index af42d1d..1e987f3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp @@ -133,8 +133,7 @@ public: } void VisitParenExpr(ParenExpr *pe) { Visit(pe->getSubExpr()); } void VisitGenericSelectionExpr(GenericSelectionExpr *ge) { - cgf.cgm.errorNYI(ge->getSourceRange(), - "AggExprEmitter: VisitGenericSelectionExpr"); + Visit(ge->getResultExpr()); } void VisitCoawaitExpr(CoawaitExpr *e) { cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoawaitExpr"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 83208bf..7989ad2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -210,60 +210,6 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorCall( return emitCall(fnInfo, callee, returnValue, args, nullptr, loc); } -namespace { -/// The parameters to pass to a usual operator delete. -struct UsualDeleteParams { - TypeAwareAllocationMode typeAwareDelete = TypeAwareAllocationMode::No; - bool destroyingDelete = false; - bool size = false; - AlignedAllocationMode alignment = AlignedAllocationMode::No; -}; -} // namespace - -// FIXME(cir): this should be shared with LLVM codegen -static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *fd) { - UsualDeleteParams params; - - const FunctionProtoType *fpt = fd->getType()->castAs<FunctionProtoType>(); - auto ai = fpt->param_type_begin(), ae = fpt->param_type_end(); - - if (fd->isTypeAwareOperatorNewOrDelete()) { - params.typeAwareDelete = TypeAwareAllocationMode::Yes; - assert(ai != ae); - ++ai; - } - - // The first argument after the type-identity parameter (if any) is - // always a void* (or C* for a destroying operator delete for class - // type C). - ++ai; - - // The next parameter may be a std::destroying_delete_t. - if (fd->isDestroyingOperatorDelete()) { - params.destroyingDelete = true; - assert(ai != ae); - ++ai; - } - - // Figure out what other parameters we should be implicitly passing. - if (ai != ae && (*ai)->isIntegerType()) { - params.size = true; - ++ai; - } else { - assert(!isTypeAwareAllocation(params.typeAwareDelete)); - } - - if (ai != ae && (*ai)->isAlignValT()) { - params.alignment = AlignedAllocationMode::Yes; - ++ai; - } else { - assert(!isTypeAwareAllocation(params.typeAwareDelete)); - } - - assert(ai == ae && "unexpected usual deallocation function parameter"); - return params; -} - static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e, unsigned minElements, mlir::Value &numElements, @@ -616,11 +562,11 @@ void CIRGenFunction::emitDeleteCall(const FunctionDecl *deleteFD, const auto *deleteFTy = deleteFD->getType()->castAs<FunctionProtoType>(); CallArgList deleteArgs; - UsualDeleteParams params = getUsualDeleteParams(deleteFD); + UsualDeleteParams params = deleteFD->getUsualDeleteParams(); auto paramTypeIt = deleteFTy->param_type_begin(); // Pass std::type_identity tag if present - if (isTypeAwareAllocation(params.typeAwareDelete)) + if (isTypeAwareAllocation(params.TypeAwareDelete)) cgm.errorNYI(deleteFD->getSourceRange(), "emitDeleteCall: type aware delete"); @@ -631,12 +577,12 @@ void CIRGenFunction::emitDeleteCall(const FunctionDecl *deleteFD, deleteArgs.add(RValue::get(deletePtr), argTy); // Pass the std::destroying_delete tag if present. - if (params.destroyingDelete) + if (params.DestroyingDelete) cgm.errorNYI(deleteFD->getSourceRange(), "emitDeleteCall: destroying delete"); // Pass the size if the delete function has a size_t parameter. - if (params.size) { + if (params.Size) { QualType sizeType = *paramTypeIt++; CharUnits deleteTypeSize = getContext().getTypeSizeInChars(deleteTy); assert(mlir::isa<cir::IntType>(convertType(sizeType)) && @@ -648,7 +594,7 @@ void CIRGenFunction::emitDeleteCall(const FunctionDecl *deleteFD, } // Pass the alignment if the delete function has an align_val_t parameter. - if (isAlignedAllocation(params.alignment)) + if (isAlignedAllocation(params.Alignment)) cgm.errorNYI(deleteFD->getSourceRange(), "emitDeleteCall: aligned allocation"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index f4bbced..500007f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -2151,8 +2151,10 @@ mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e, } if (e->getOpcode() == UO_Real) { - return promotionTy.isNull() ? Visit(op) - : cgf.emitPromotedScalarExpr(op, promotionTy); + mlir::Value operand = promotionTy.isNull() + ? Visit(op) + : cgf.emitPromotedScalarExpr(op, promotionTy); + return builder.createComplexReal(loc, operand); } // __imag on a scalar returns zero. Emit the subexpr to ensure side diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index a404c0c..b26b4f2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -836,6 +836,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) { return emitCallExprLValue(cast<CallExpr>(e)); case Expr::ParenExprClass: return emitLValue(cast<ParenExpr>(e)->getSubExpr()); + case Expr::GenericSelectionExprClass: + return emitLValue(cast<GenericSelectionExpr>(e)->getResultExpr()); case Expr::DeclRefExprClass: return emitDeclRefLValue(cast<DeclRefExpr>(e)); case Expr::CStyleCastExprClass: diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp index 7f9350a..a9af753 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp @@ -62,7 +62,7 @@ mlir::Value CIRGenFunction::createOpenACCConstantInt(mlir::Location loc, auto constOp = builder.create<mlir::arith::ConstantOp>( loc, builder.getIntegerAttr(ty, value)); - return constOp.getResult(); + return constOp; } CIRGenFunction::OpenACCDataOperandInfo diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp index 3cf0534..f22f3e8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp @@ -110,7 +110,7 @@ class OpenACCClauseCIREmitter final auto constOp = builder.create<mlir::arith::ConstantOp>( loc, builder.getIntegerAttr(ty, value)); - return constOp.getResult(); + return constOp; } mlir::Value createConstantInt(SourceLocation loc, unsigned width, @@ -230,13 +230,13 @@ class OpenACCClauseCIREmitter final std::is_same_v<AfterOpTy, mlir::acc::DetachOp>) { // Detach/Delete ops don't have the variable reference here, so they // take 1 fewer argument to their build function. - afterOp = builder.create<AfterOpTy>( - opInfo.beginLoc, beforeOp.getResult(), structured, implicit, - opInfo.name, opInfo.bounds); + afterOp = + builder.create<AfterOpTy>(opInfo.beginLoc, beforeOp, structured, + implicit, opInfo.name, opInfo.bounds); } else { afterOp = builder.create<AfterOpTy>( - opInfo.beginLoc, beforeOp.getResult(), opInfo.varValue, structured, - implicit, opInfo.name, opInfo.bounds); + opInfo.beginLoc, beforeOp, opInfo.varValue, structured, implicit, + opInfo.name, opInfo.bounds); } } @@ -1005,7 +1005,7 @@ public: /*temporary=*/nullptr, OpenACCReductionOperator::Invalid, Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType, opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType, - privateOp.getResult()); + privateOp); // TODO: OpenACC: The dialect is going to change in the near future to // have these be on a different operation, so when that changes, we // probably need to change these here. @@ -1053,7 +1053,7 @@ public: OpenACCReductionOperator::Invalid, Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType, opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType, - firstPrivateOp.getResult()); + firstPrivateOp); // TODO: OpenACC: The dialect is going to change in the near future to // have these be on a different operation, so when that changes, we @@ -1101,7 +1101,7 @@ public: /*temporary=*/nullptr, clause.getReductionOp(), Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType, opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType, - reductionOp.getResult()); + reductionOp); operation.addReduction(builder.getContext(), reductionOp, recipe); } diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp index fc28ac5..e603884 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp @@ -36,6 +36,75 @@ mlir::Block *OpenACCRecipeBuilderBase::createRecipeBlock(mlir::Region ®ion, llvm::SmallVector<mlir::Location> locs{types.size(), loc}; return builder.createBlock(®ion, region.end(), types, locs); } +void OpenACCRecipeBuilderBase::makeAllocaCopy(mlir::Location loc, + mlir::Type copyType, + mlir::Value numEltsToCopy, + mlir::Value offsetPerSubarray, + mlir::Value destAlloca, + mlir::Value srcAlloca) { + mlir::OpBuilder::InsertionGuard guardCase(builder); + + mlir::Type itrTy = cgf.cgm.convertType(cgf.getContext().UnsignedLongLongTy); + auto itrPtrTy = cir::PointerType::get(itrTy); + mlir::IntegerAttr itrAlign = + cgf.cgm.getSize(cgf.getContext().getTypeAlignInChars( + cgf.getContext().UnsignedLongLongTy)); + + auto loopBuilder = [&]() { + auto itr = + cir::AllocaOp::create(builder, loc, itrPtrTy, itrTy, "itr", itrAlign); + cir::ConstantOp constZero = builder.getConstInt(loc, itrTy, 0); + builder.CIRBaseBuilderTy::createStore(loc, constZero, itr); + builder.createFor( + loc, + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // itr < numEltsToCopy + // Enforce a trip count of 1 if there wasn't any element count, this + // way we can just use this loop with a constant bounds instead of a + // separate code path. + if (!numEltsToCopy) + numEltsToCopy = builder.getConstInt(loc, itrTy, 1); + + auto loadCur = cir::LoadOp::create(builder, loc, {itr}); + auto cmp = builder.createCompare(loc, cir::CmpOpKind::lt, loadCur, + numEltsToCopy); + builder.createCondition(cmp); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // destAlloca[itr] = srcAlloca[offsetPerSubArray * itr]; + auto loadCur = cir::LoadOp::create(builder, loc, {itr}); + auto srcOffset = builder.createMul(loc, offsetPerSubarray, loadCur); + + auto ptrToOffsetIntoSrc = cir::PtrStrideOp::create( + builder, loc, copyType, srcAlloca, srcOffset); + + auto offsetIntoDecayDest = cir::PtrStrideOp::create( + builder, loc, builder.getPointerTo(copyType), destAlloca, + loadCur); + + builder.CIRBaseBuilderTy::createStore(loc, ptrToOffsetIntoSrc, + offsetIntoDecayDest); + builder.createYield(loc); + }, + /*stepBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // Simple increment of the iterator. + auto load = cir::LoadOp::create(builder, loc, {itr}); + auto inc = cir::UnaryOp::create(builder, loc, load.getType(), + cir::UnaryOpKind::Inc, load); + builder.CIRBaseBuilderTy::createStore(loc, inc, itr); + builder.createYield(loc); + }); + }; + + cir::ScopeOp::create(builder, loc, + [&](mlir::OpBuilder &b, mlir::Location loc) { + loopBuilder(); + builder.createYield(loc); + }); +} mlir::Value OpenACCRecipeBuilderBase::makeBoundsAlloca( mlir::Block *block, SourceRange exprRange, mlir::Location loc, @@ -78,6 +147,10 @@ mlir::Value OpenACCRecipeBuilderBase::makeBoundsAlloca( bool lastBoundWasArray = isArrayTy(boundTypes.back()); + // Make sure we track a moving version of this so we can get our + // 'copying' back to correct. + mlir::Value lastAlloca = initialAlloca; + // Since we're iterating the types in reverse, this sets up for each index // corresponding to the boundsRange to be the 'after application of the // bounds. @@ -125,14 +198,21 @@ mlir::Value OpenACCRecipeBuilderBase::makeBoundsAlloca( mlir::Type eltTy = cgf.convertType(resultType); cir::PointerType ptrTy = builder.getPointerTo(eltTy); - builder.createAlloca(loc, ptrTy, eltTy, "openacc.init.bounds", - cgf.getContext().getTypeAlignInChars(resultType), - curSize); - - // TODO: OpenACC : At this point we should be copying the addresses of - // each element of this to the last allocation. At the moment, that is - // not yet implemented. - cgf.cgm.errorNYI(exprRange, "OpenACC recipe alloca copying"); + mlir::Value curAlloca = builder.createAlloca( + loc, ptrTy, eltTy, "openacc.init.bounds", + cgf.getContext().getTypeAlignInChars(resultType), curSize); + + makeAllocaCopy(loc, ptrTy, cumulativeElts, eltsPerSubArray, lastAlloca, + curAlloca); + lastAlloca = curAlloca; + } else { + // In the case of an array, we just need to decay the pointer, so just do + // a zero-offset stride on the last alloca to decay it down an array + // level. + cir::ConstantOp constZero = builder.getConstInt(loc, itrTy, 0); + lastAlloca = builder.getArrayElement(loc, loc, lastAlloca, + cgf.convertType(resultType), + constZero, /*shouldDecay=*/true); } cumulativeElts = eltsToAlloca; @@ -160,7 +240,7 @@ OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue, if (auto arrayTy = dyn_cast<cir::ArrayType>(eltTy)) return builder.getArrayElement(loc, loc, subVal, arrayTy.getElementType(), - idxLoad.getResult(), + idxLoad, /*shouldDecay=*/true); assert(isa<cir::PointerType>(eltTy)); @@ -168,8 +248,8 @@ OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue, auto eltLoad = cir::LoadOp::create(builder, loc, {subVal}); return cir::PtrStrideOp::create(builder, loc, eltLoad.getType(), eltLoad, - idxLoad.getResult()) - .getResult(); + idxLoad); + }; auto forStmtBuilder = [&]() { @@ -191,12 +271,11 @@ OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue, if (inverse) { cir::ConstantOp constOne = builder.getConstInt(loc, itrTy, 1); - auto sub = - cir::BinOp::create(builder, loc, itrTy, cir::BinOpKind::Sub, - ubConversion.getResult(0), constOne.getResult()); + auto sub = cir::BinOp::create(builder, loc, itrTy, cir::BinOpKind::Sub, + ubConversion.getResult(0), constOne); // Upperbound is exclusive, so subtract 1. - builder.CIRBaseBuilderTy::createStore(loc, sub.getResult(), itr); + builder.CIRBaseBuilderTy::createStore(loc, sub, itr); } else { // Lowerbound is inclusive, so we can include it. builder.CIRBaseBuilderTy::createStore(loc, lbConversion.getResult(0), @@ -214,8 +293,8 @@ OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue, auto loadCur = cir::LoadOp::create(builder, loc, {itr}); // Use 'not equal' since we are just doing an increment/decrement. auto cmp = builder.createCompare( - loc, inverse ? cir::CmpOpKind::ge : cir::CmpOpKind::lt, - loadCur.getResult(), endItr.getResult(0)); + loc, inverse ? cir::CmpOpKind::ge : cir::CmpOpKind::lt, loadCur, + endItr.getResult(0)); builder.createCondition(cmp); }, /*bodyBuilder=*/ @@ -229,11 +308,10 @@ OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue, /*stepBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { auto load = cir::LoadOp::create(builder, loc, {itr}); - auto unary = cir::UnaryOp::create(builder, loc, load.getType(), - inverse ? cir::UnaryOpKind::Dec - : cir::UnaryOpKind::Inc, - load.getResult()); - builder.CIRBaseBuilderTy::createStore(loc, unary.getResult(), itr); + auto unary = cir::UnaryOp::create( + builder, loc, load.getType(), + inverse ? cir::UnaryOpKind::Dec : cir::UnaryOpKind::Inc, load); + builder.CIRBaseBuilderTy::createStore(loc, unary, itr); builder.createYield(loc); }); }; diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h index acd187b..d802ccb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h @@ -24,6 +24,10 @@ namespace clang::CIRGen { class OpenACCRecipeBuilderBase { + // makes the copy of the addresses of an alloca to the previous allocation. + void makeAllocaCopy(mlir::Location loc, mlir::Type copyType, + mlir::Value numEltsToCopy, mlir::Value offsetPerSubarray, + mlir::Value destAlloca, mlir::Value srcAlloca); // This function generates the required alloca, similar to // 'emitAutoVarAlloca', except for the OpenACC array/pointer types. mlir::Value makeBoundsAlloca(mlir::Block *block, SourceRange exprRange, diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp index a762881..2baeb43 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp @@ -615,7 +615,7 @@ void CIRRecordLowering::determinePacked(bool nvBaseType) { continue; // If any member falls at an offset that it not a multiple of its alignment, // then the entire record must be packed. - if (member.offset % getAlignment(member.data)) + if (!member.offset.isMultipleOf(getAlignment(member.data))) packed = true; if (member.offset < nvSize) nvAlignment = std::max(nvAlignment, getAlignment(member.data)); @@ -623,12 +623,12 @@ void CIRRecordLowering::determinePacked(bool nvBaseType) { } // If the size of the record (the capstone's offset) is not a multiple of the // record's alignment, it must be packed. - if (members.back().offset % alignment) + if (!members.back().offset.isMultipleOf(alignment)) packed = true; // If the non-virtual sub-object is not a multiple of the non-virtual // sub-object's alignment, it must be packed. We cannot have a packed // non-virtual sub-object and an unpacked complete object or vise versa. - if (nvSize % nvAlignment) + if (!nvSize.isMultipleOf(nvAlignment)) packed = true; // Update the alignment of the sentinel. if (!packed) @@ -824,7 +824,7 @@ void CIRRecordLowering::lowerUnion() { appendPaddingBytes(layoutSize - getSize(storageType)); // Set packed if we need it. - if (layoutSize % getAlignment(storageType)) + if (!layoutSize.isMultipleOf(getAlignment(storageType))) packed = true; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index fb87036..6b5cc80 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2388,14 +2388,23 @@ OpFoldResult cir::ComplexCreateOp::fold(FoldAdaptor adaptor) { //===----------------------------------------------------------------------===// LogicalResult cir::ComplexRealOp::verify() { - if (getType() != getOperand().getType().getElementType()) { + mlir::Type operandTy = getOperand().getType(); + if (auto complexOperandTy = mlir::dyn_cast<cir::ComplexType>(operandTy)) { + operandTy = complexOperandTy.getElementType(); + } + + if (getType() != operandTy) { emitOpError() << ": result type does not match operand type"; return failure(); } + return success(); } OpFoldResult cir::ComplexRealOp::fold(FoldAdaptor adaptor) { + if (!mlir::isa<cir::ComplexType>(getOperand().getType())) + return nullptr; + if (auto complexCreateOp = getOperand().getDefiningOp<cir::ComplexCreateOp>()) return complexCreateOp.getOperand(0); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 22f069d..4bc7783 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2999,8 +2999,13 @@ mlir::LogicalResult CIRToLLVMComplexRealOpLowering::matchAndRewrite( cir::ComplexRealOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { mlir::Type resultLLVMTy = getTypeConverter()->convertType(op.getType()); - rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( - op, resultLLVMTy, adaptor.getOperand(), llvm::ArrayRef<std::int64_t>{0}); + mlir::Value operand = adaptor.getOperand(); + if (mlir::isa<cir::ComplexType>(op.getOperand().getType())) { + operand = mlir::LLVM::ExtractValueOp::create( + rewriter, op.getLoc(), resultLLVMTy, operand, + llvm::ArrayRef<std::int64_t>{0}); + } + rewriter.replaceOp(op, operand); return mlir::success(); } diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp index eeb0fd6..4a3446a 100644 --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -880,7 +880,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { CharUnits MaxInlineWidth = getContext().toCharUnitsFromBits(MaxInlineWidthInBits); DiagnosticsEngine &Diags = CGM.getDiags(); - bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0; + bool Misaligned = !Ptr.getAlignment().isMultipleOf(TInfo.Width); bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits; if (Misaligned) { Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned) diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp index a092b71..c52526c 100644 --- a/clang/lib/CodeGen/CGExprCXX.cpp +++ b/clang/lib/CodeGen/CGExprCXX.cpp @@ -1377,58 +1377,6 @@ RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, } namespace { -/// The parameters to pass to a usual operator delete. -struct UsualDeleteParams { - TypeAwareAllocationMode TypeAwareDelete = TypeAwareAllocationMode::No; - bool DestroyingDelete = false; - bool Size = false; - AlignedAllocationMode Alignment = AlignedAllocationMode::No; -}; -} - -static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) { - UsualDeleteParams Params; - - const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>(); - auto AI = FPT->param_type_begin(), AE = FPT->param_type_end(); - - if (FD->isTypeAwareOperatorNewOrDelete()) { - Params.TypeAwareDelete = TypeAwareAllocationMode::Yes; - assert(AI != AE); - ++AI; - } - - // The first argument after the type-identity parameter (if any) is - // always a void* (or C* for a destroying operator delete for class - // type C). - ++AI; - - // The next parameter may be a std::destroying_delete_t. - if (FD->isDestroyingOperatorDelete()) { - assert(!isTypeAwareAllocation(Params.TypeAwareDelete)); - Params.DestroyingDelete = true; - assert(AI != AE); - ++AI; - } - - // Figure out what other parameters we should be implicitly passing. - if (AI != AE && (*AI)->isIntegerType()) { - Params.Size = true; - ++AI; - } else - assert(!isTypeAwareAllocation(Params.TypeAwareDelete)); - - if (AI != AE && (*AI)->isAlignValT()) { - Params.Alignment = AlignedAllocationMode::Yes; - ++AI; - } else - assert(!isTypeAwareAllocation(Params.TypeAwareDelete)); - - assert(AI == AE && "unexpected usual deallocation function parameter"); - return Params; -} - -namespace { /// A cleanup to call the given 'operator delete' function upon abnormal /// exit from a new expression. Templated on a traits type that deals with /// ensuring that the arguments dominate the cleanup if necessary. @@ -1505,7 +1453,7 @@ namespace { } else { // For a non-placement new-expression, 'operator delete' can take a // size and/or an alignment if it has the right parameters. - Params = getUsualDeleteParams(OperatorDelete); + Params = OperatorDelete->getUsualDeleteParams(); } assert(!Params.DestroyingDelete && @@ -1838,7 +1786,7 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD, const auto *DeleteFTy = DeleteFD->getType()->castAs<FunctionProtoType>(); CallArgList DeleteArgs; - auto Params = getUsualDeleteParams(DeleteFD); + auto Params = DeleteFD->getUsualDeleteParams(); auto ParamTypeIt = DeleteFTy->param_type_begin(); std::optional<llvm::AllocaInst *> TagAlloca; diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp index b44dd9e..6407afc 100644 --- a/clang/lib/CodeGen/CGExprConstant.cpp +++ b/clang/lib/CodeGen/CGExprConstant.cpp @@ -433,7 +433,7 @@ llvm::Constant *ConstantAggregateBuilder::buildFrom( // All remaining elements must be the same type. if (Elems[I]->getType() != CommonType || - Offset(I) % ElemSize != 0) { + !Offset(I).isMultipleOf(ElemSize)) { CanEmitArray = false; break; } diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp index 60f30a1..dbcce9b 100644 --- a/clang/lib/CodeGen/CGObjCMac.cpp +++ b/clang/lib/CodeGen/CGObjCMac.cpp @@ -5367,7 +5367,7 @@ IvarLayoutBuilder::buildBitmap(CGObjCCommonMac &CGObjC, // Ignore scan requests that don't start at an even multiple of the // word size. We can't encode them. - if ((beginOfScan % WordSize) != 0) + if (!beginOfScan.isMultipleOf(WordSize)) continue; // Ignore scan requests that start before the instance start. diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp index 75bde3f..8cda583 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -1542,7 +1542,7 @@ static llvm::TargetRegionEntryInfo getEntryInfoFromPresumedLoc( SourceManager &SM = CGM.getContext().getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(BeginLoc); - if (CGM.getFileSystem()->exists(PLoc.getFilename())) + if (!CGM.getFileSystem()->exists(PLoc.getFilename())) PLoc = SM.getPresumedLoc(BeginLoc, /*UseLineDirectives=*/false); return std::pair<std::string, uint64_t>(PLoc.getFilename(), PLoc.getLine()); diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp index 5f6136c..e9205c6 100644 --- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp +++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp @@ -369,11 +369,11 @@ void CGRecordLowering::lowerUnion(bool isNonVirtualBaseType) { appendPaddingBytes(LayoutSize - getSize(StorageType)); // Set packed if we need it. const auto StorageAlignment = getAlignment(StorageType); - assert((Layout.getSize() % StorageAlignment == 0 || - Layout.getDataSize() % StorageAlignment) && + assert((Layout.getSize().isMultipleOf(StorageAlignment) || + !Layout.getDataSize().isMultipleOf(StorageAlignment)) && "Union's standard layout and no_unique_address layout must agree on " "packedness"); - if (Layout.getDataSize() % StorageAlignment) + if (!Layout.getDataSize().isMultipleOf(StorageAlignment)) Packed = true; } @@ -977,7 +977,7 @@ void CGRecordLowering::determinePacked(bool NVBaseType) { continue; // If any member falls at an offset that it not a multiple of its alignment, // then the entire record must be packed. - if (Member.Offset % getAlignment(Member.Data)) + if (!Member.Offset.isMultipleOf(getAlignment(Member.Data))) Packed = true; if (Member.Offset < NVSize) NVAlignment = std::max(NVAlignment, getAlignment(Member.Data)); @@ -985,12 +985,12 @@ void CGRecordLowering::determinePacked(bool NVBaseType) { } // If the size of the record (the capstone's offset) is not a multiple of the // record's alignment, it must be packed. - if (Members.back().Offset % Alignment) + if (!Members.back().Offset.isMultipleOf(Alignment)) Packed = true; // If the non-virtual sub-object is not a multiple of the non-virtual // sub-object's alignment, it must be packed. We cannot have a packed // non-virtual sub-object and an unpacked complete object or vise versa. - if (NVSize % NVAlignment) + if (!NVSize.isMultipleOf(NVAlignment)) Packed = true; // Update the alignment of the sentinel. if (!Packed) diff --git a/clang/lib/Driver/ToolChains/HLSL.cpp b/clang/lib/Driver/ToolChains/HLSL.cpp index f4858e4..2869549 100644 --- a/clang/lib/Driver/ToolChains/HLSL.cpp +++ b/clang/lib/Driver/ToolChains/HLSL.cpp @@ -64,7 +64,7 @@ bool isLegalShaderModel(Triple &T) { } break; case Triple::EnvironmentType::RootSignature: VersionTuple MinVer(1, 0); - VersionTuple MaxVer(1, 1); + VersionTuple MaxVer(1, 2); return MinVer <= Version && Version <= MaxVer; } return false; diff --git a/clang/lib/Lex/HeaderSearch.cpp b/clang/lib/Lex/HeaderSearch.cpp index ae09f70..238c5e2 100644 --- a/clang/lib/Lex/HeaderSearch.cpp +++ b/clang/lib/Lex/HeaderSearch.cpp @@ -2077,7 +2077,7 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics( llvm::SmallString<32> FilePath = File; if (!WorkingDir.empty() && !path::is_absolute(FilePath)) - fs::make_absolute(WorkingDir, FilePath); + path::make_absolute(WorkingDir, FilePath); // remove_dots switches to backslashes on windows as a side-effect! // We always want to suggest forward slashes for includes. // (not remove_dots(..., posix) as that misparses windows paths). @@ -2091,7 +2091,7 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics( // `BestPrefixLength` accordingly. auto CheckDir = [&](llvm::SmallString<32> Dir) -> bool { if (!WorkingDir.empty() && !path::is_absolute(Dir)) - fs::make_absolute(WorkingDir, Dir); + path::make_absolute(WorkingDir, Dir); path::remove_dots(Dir, /*remove_dot_dot=*/true); for (auto NI = path::begin(File), NE = path::end(File), DI = path::begin(Dir), DE = path::end(Dir); diff --git a/clang/lib/Parse/ParseHLSLRootSignature.cpp b/clang/lib/Parse/ParseHLSLRootSignature.cpp index 3b16efb..7be6eec 100644 --- a/clang/lib/Parse/ParseHLSLRootSignature.cpp +++ b/clang/lib/Parse/ParseHLSLRootSignature.cpp @@ -485,6 +485,9 @@ std::optional<StaticSampler> RootSignatureParser::parseStaticSampler() { if (Params->Visibility.has_value()) Sampler.Visibility = Params->Visibility.value(); + if (Params->Flags.has_value()) + Sampler.Flags = Params->Flags.value(); + return Sampler; } @@ -926,6 +929,20 @@ RootSignatureParser::parseStaticSamplerParams() { if (!Visibility.has_value()) return std::nullopt; Params.Visibility = Visibility; + } else if (tryConsumeExpectedToken(TokenKind::kw_flags)) { + // `flags` `=` STATIC_SAMPLE_FLAGS + if (Params.Flags.has_value()) { + reportDiag(diag::err_hlsl_rootsig_repeat_param) << CurToken.TokKind; + return std::nullopt; + } + + if (consumeExpectedToken(TokenKind::pu_equal)) + return std::nullopt; + + auto Flags = parseStaticSamplerFlags(TokenKind::kw_flags); + if (!Flags.has_value()) + return std::nullopt; + Params.Flags = Flags; } else { consumeNextToken(); // let diagnostic be at the start of invalid token reportDiag(diag::err_hlsl_invalid_token) @@ -1255,6 +1272,50 @@ RootSignatureParser::parseDescriptorRangeFlags(TokenKind Context) { return Flags; } +std::optional<llvm::dxbc::StaticSamplerFlags> +RootSignatureParser::parseStaticSamplerFlags(TokenKind Context) { + assert(CurToken.TokKind == TokenKind::pu_equal && + "Expects to only be invoked starting at given keyword"); + + // Handle the edge-case of '0' to specify no flags set + if (tryConsumeExpectedToken(TokenKind::int_literal)) { + if (!verifyZeroFlag()) { + reportDiag(diag::err_hlsl_rootsig_non_zero_flag); + return std::nullopt; + } + return llvm::dxbc::StaticSamplerFlags::None; + } + + TokenKind Expected[] = { +#define STATIC_SAMPLER_FLAG_ENUM(NAME, LIT) TokenKind::en_##NAME, +#include "clang/Lex/HLSLRootSignatureTokenKinds.def" + }; + + std::optional<llvm::dxbc::StaticSamplerFlags> Flags; + + do { + if (tryConsumeExpectedToken(Expected)) { + switch (CurToken.TokKind) { +#define STATIC_SAMPLER_FLAG_ENUM(NAME, LIT) \ + case TokenKind::en_##NAME: \ + Flags = maybeOrFlag<llvm::dxbc::StaticSamplerFlags>( \ + Flags, llvm::dxbc::StaticSamplerFlags::NAME); \ + break; +#include "clang/Lex/HLSLRootSignatureTokenKinds.def" + default: + llvm_unreachable("Switch for consumed enum token was not provided"); + } + } else { + consumeNextToken(); // consume token to point at invalid token + reportDiag(diag::err_hlsl_invalid_token) + << /*value=*/1 << /*value of*/ Context; + return std::nullopt; + } + } while (tryConsumeExpectedToken(TokenKind::pu_or)); + + return Flags; +} + std::optional<uint32_t> RootSignatureParser::handleUIntLiteral() { // Parse the numeric value and do semantic checks on its specification clang::NumericLiteralParser Literal( diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index 39c3aa2..3cc61b1 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -5954,6 +5954,9 @@ bool Sema::BuiltinAssumeAligned(CallExpr *TheCall) { if (Result > Sema::MaximumAlignment) Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) << SecondArg->getSourceRange() << Sema::MaximumAlignment; + + TheCall->setArg(1, + ConstantExpr::Create(Context, SecondArg, APValue(Result))); } if (NumArgs > 2) { @@ -14881,13 +14884,11 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, // Diag message shows element size in bits and in "bytes" (platform- // dependent CharUnits) DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, - PDiag(DiagID) - << toString(index, 10, true) << AddrBits - << (unsigned)ASTC.toBits(*ElemCharUnits) - << toString(ElemBytes, 10, false) - << toString(MaxElems, 10, false) - << (unsigned)MaxElems.getLimitedValue(~0U) - << IndexExpr->getSourceRange()); + PDiag(DiagID) << index << AddrBits + << (unsigned)ASTC.toBits(*ElemCharUnits) + << ElemBytes << MaxElems + << MaxElems.getZExtValue() + << IndexExpr->getSourceRange()); const NamedDecl *ND = nullptr; // Try harder to find a NamedDecl to point at in the note. @@ -14970,10 +14971,10 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, unsigned CastMsg = (!ASE || BaseType == EffectiveType) ? 0 : 1; QualType CastMsgTy = ASE ? ASE->getLHS()->getType() : QualType(); - DiagRuntimeBehavior( - BaseExpr->getBeginLoc(), BaseExpr, - PDiag(DiagID) << toString(index, 10, true) << ArrayTy->desugar() - << CastMsg << CastMsgTy << IndexExpr->getSourceRange()); + DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, + PDiag(DiagID) + << index << ArrayTy->desugar() << CastMsg + << CastMsgTy << IndexExpr->getSourceRange()); } else { unsigned DiagID = diag::warn_array_index_precedes_bounds; if (!ASE) { @@ -14982,8 +14983,7 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, } DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, - PDiag(DiagID) << toString(index, 10, true) - << IndexExpr->getSourceRange()); + PDiag(DiagID) << index << IndexExpr->getSourceRange()); } const NamedDecl *ND = nullptr; @@ -15946,7 +15946,7 @@ void Sema::RefersToMemberWithReducedAlignment( } // Check if the synthesized offset fulfills the alignment. - if (Offset % ExpectedAlignment != 0 || + if (!Offset.isMultipleOf(ExpectedAlignment) || // It may fulfill the offset it but the effective alignment may still be // lower than the expected expression alignment. CompleteObjectAlignment < ExpectedAlignment) { diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/RetainPtrCtorAdoptChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/RetainPtrCtorAdoptChecker.cpp index e1f9a77..955b8d1 100644 --- a/clang/lib/StaticAnalyzer/Checkers/WebKit/RetainPtrCtorAdoptChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/RetainPtrCtorAdoptChecker.cpp @@ -385,6 +385,10 @@ public: if (RTC.isUnretained(RetValue->getType())) return; } + if (retainsRet && *retainsRet) { + CreateOrCopyFnCall.insert(RetValue); + return; + } if (auto *CE = dyn_cast<CallExpr>(RetValue)) { auto *Callee = CE->getDirectCallee(); if (!Callee || !isCreateOrCopyFunction(Callee)) diff --git a/clang/lib/StaticAnalyzer/Core/Store.cpp b/clang/lib/StaticAnalyzer/Core/Store.cpp index 971e6bc..b609f36 100644 --- a/clang/lib/StaticAnalyzer/Core/Store.cpp +++ b/clang/lib/StaticAnalyzer/Core/Store.cpp @@ -210,7 +210,7 @@ std::optional<const MemRegion *> StoreManager::castRegion(const MemRegion *R, // Is the offset a multiple of the size? If so, we can layer the // ElementRegion (with elementType == PointeeTy) directly on top of // the base region. - if (off % pointeeTySize == 0) { + if (off.isMultipleOf(pointeeTySize)) { newIndex = off / pointeeTySize; newSuperR = baseR; } diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp index d370bfd..66cf2688 100644 --- a/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp +++ b/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp @@ -31,7 +31,7 @@ public: for (const auto &File : getDependencies()) { CanonPath = File; llvm::sys::path::remove_dots(CanonPath, /*remove_dot_dot=*/true); - llvm::sys::fs::make_absolute(WorkingDirectory, CanonPath); + llvm::sys::path::make_absolute(WorkingDirectory, CanonPath); C.handleFileDependency(CanonPath); } } diff --git a/clang/test/AST/ByteCode/const-eval.c b/clang/test/AST/ByteCode/const-eval.c index c6b51d1..d6cf600 100644 --- a/clang/test/AST/ByteCode/const-eval.c +++ b/clang/test/AST/ByteCode/const-eval.c @@ -144,7 +144,7 @@ EVAL_EXPR(52, &pr24622 == (void *)&PR24622); // We evaluate these by providing 2s' complement semantics in constant // expressions, like we do for integers. -void *PR28739a = (__int128)(unsigned long)-1 + &PR28739a; // both-warning {{the pointer incremented by 18446744073709551615 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2305843009213693952 elements)}} +void *PR28739a = (__int128)(unsigned long)-1 + &PR28739a; // both-warning {{the pointer incremented by 18'446'744'073'709'551'615 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2'305'843'009'213'693'952 elements)}} void *PR28739b = &PR28739b + (__int128)(unsigned long)-1; // both-warning {{refers past the last possible element}} __int128 PR28739c = (&PR28739c + (__int128)(unsigned long)-1) - &PR28739c; // both-warning {{refers past the last possible element}} diff --git a/clang/test/AST/HLSL/RootSignature-Target-AST.hlsl b/clang/test/AST/HLSL/RootSignature-Target-AST.hlsl index 91441e3..129ab70 100644 --- a/clang/test/AST/HLSL/RootSignature-Target-AST.hlsl +++ b/clang/test/AST/HLSL/RootSignature-Target-AST.hlsl @@ -1,9 +1,15 @@ // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-rootsignature -ast-dump \ +// RUN: -fdx-rootsignature-version=rootsig_1_0 \ +// RUN: -hlsl-entry EntryRootSig -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_0 + +// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-rootsignature -ast-dump \ +// RUN: -fdx-rootsignature-version=rootsig_1_1 \ // RUN: -hlsl-entry EntryRootSig -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_1 // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-rootsignature -ast-dump \ -// RUN: -fdx-rootsignature-version=rootsig_1_0 \ -// RUN: -hlsl-entry EntryRootSig -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_0 +// RUN: -fdx-rootsignature-version=rootsig_1_2 \ +// RUN: -hlsl-entry EntryRootSig -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_2 + // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-rootsignature -ast-dump \ // RUN: -D CmdRS='"UAV(u0)"'\ @@ -12,11 +18,13 @@ // CHECK: -HLSLRootSignatureDecl 0x{{.*}} {{.*}} implicit [[ENTRY_RS_DECL:__hlsl_rootsig_decl_\d*]] // CHECK-V1_0-SAME: version: 1.0, // CHECK-V1_1-SAME: version: 1.1, +// CHECK-V1_2-SAME: version: 1.2, // CHECK-SAME: RootElements{ // CHECK-SAME: RootCBV(b0, // CHECK-SAME: space = 0, visibility = All, // CHECK-V1_0-SAME: flags = DataVolatile // CHECK-V1_1-SAME: flags = DataStaticWhileSetAtExecute +// CHECK-V1_2-SAME: flags = DataStaticWhileSetAtExecute // CHECK-SAME: ) // CHECK-SAME: } #define EntryRootSig "CBV(b0)" diff --git a/clang/test/AST/HLSL/RootSignatures-AST.hlsl b/clang/test/AST/HLSL/RootSignatures-AST.hlsl index 32da1f1..0f0f3a5 100644 --- a/clang/test/AST/HLSL/RootSignatures-AST.hlsl +++ b/clang/test/AST/HLSL/RootSignatures-AST.hlsl @@ -6,6 +6,9 @@ // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -ast-dump \ // RUN: -fdx-rootsignature-version=rootsig_1_1 \ // RUN: -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_1 +// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -ast-dump \ +// RUN: -fdx-rootsignature-version=rootsig_1_2 \ +// RUN: -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_2 // This test ensures that the sample root signature is parsed without error and // the Attr AST Node is created succesfully. If an invalid root signature was @@ -31,6 +34,7 @@ // CHECK: -HLSLRootSignatureDecl 0x{{.*}} {{.*}} implicit [[SAMPLE_RS_DECL:__hlsl_rootsig_decl_\d*]] // CHECK-V1_0: version: 1.0, // CHECK-V1_1: version: 1.1, +// CHECK-V1_2: version: 1.2, // CHECK-SAME: RootElements{ // CHECK-SAME: RootFlags(AllowInputAssemblerInputLayout | DenyVertexShaderRootAccess), // CHECK-SAME: RootCBV(b0, @@ -62,6 +66,7 @@ // CHECK-SAME: s0, numDescriptors = 4, space = 1, offset = DescriptorTableOffsetAppend, // CHECK-V1_0-SAME: flags = DescriptorsVolatile // CHECK-V1_1-SAME: flags = None +// CHECK-V1_2-SAME: flags = None // CHECK-SAME: ), // CHECK-SAME: DescriptorTable( // CHECK-SAME: numClauses = 1, visibility = All @@ -73,6 +78,7 @@ // CHECK-SAME: s1, filter = Anisotropic, addressU = Wrap, addressV = Wrap, addressW = Wrap, // CHECK-SAME: mipLODBias = 0.000000e+00, maxAnisotropy = 16, comparisonFunc = LessEqual, // CHECK-SAME: borderColor = OpaqueWhite, minLOD = 0.000000e+00, maxLOD = 3.402823e+38, space = 0, visibility = All +// CHECK-SAME: flags = None // CHECK-SAME: )} // CHECK: -RootSignatureAttr 0x{{.*}} {{.*}} [[SAMPLE_RS_DECL]] @@ -131,3 +137,24 @@ void same_rs_string_main() {} // CHECK: -RootSignatureAttr 0x{{.*}} {{.*}} [[DIFF_RS_DECL]] [RootSignature(SampleDifferentRS)] void different_rs_string_main() {} + +#define SampleStaticSamplerRS \ + "StaticSampler(s0, flags = NON_NORMALIZED_COORDINATES)" + +// Ensure that static samplers flags are correctly parsed in different versions + +// CHECK: -HLSLRootSignatureDecl 0x{{.*}} {{.*}} implicit [[DIFF_RS_DECL:__hlsl_rootsig_decl_\d*]] +// CHECK-V1_0: version: 1.0, +// CHECK-V1_1: version: 1.1, +// CHECK-V1_2: version: 1.2, +// CHECK-SAME: RootElements{ +// CHECK-SAME: StaticSampler( +// CHECK-SAME: s0, filter = Anisotropic, addressU = Wrap, addressV = Wrap, addressW = Wrap, +// CHECK-SAME: mipLODBias = 0.000000e+00, maxAnisotropy = 16, comparisonFunc = LessEqual, +// CHECK-SAME: borderColor = OpaqueWhite, minLOD = 0.000000e+00, maxLOD = 3.402823e+38, space = 0, visibility = All +// CHECK-SAME: flags = NonNormalizedCoordinates +// CHECK-SAME: )} + +// CHECK: -RootSignatureAttr 0x{{.*}} {{.*}} [[DIFF_RS_DECL]] +[RootSignature(SampleStaticSamplerRS)] +void statoc_sampler_v12_main() {} diff --git a/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h b/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h index 39dee17..dacb713 100644 --- a/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h +++ b/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h @@ -17,6 +17,20 @@ template<typename T> typename remove_reference<T>::type&& move(T&& t); #endif +namespace std { + +template <bool, typename U = void> struct enable_if { +}; + +template <typename T> struct enable_if<true, T> { + using type = T; +}; + +template <bool value, class T = void> +using enable_if_t = typename enable_if<value, T>::type; + +} + @class NSString; @class NSArray; @class NSMutableArray; @@ -100,6 +114,7 @@ id CFBridgingRelease(CFTypeRef X) { __attribute__((objc_root_class)) @interface NSObject + (instancetype) alloc; ++ (instancetype) allocWithZone:(NSZone *)zone; + (Class) class; + (Class) superclass; - (instancetype) init; @@ -232,6 +247,14 @@ template <typename T> struct RemovePointer<T*> { typedef T Type; }; +template <typename T> struct IsPointer { + static constexpr bool value = false; +}; + +template <typename T> struct IsPointer<T*> { + static constexpr bool value = true; +}; + template <typename T> struct RetainPtr { using ValueType = typename RemovePointer<T>::Type; using PtrType = ValueType*; @@ -285,12 +308,23 @@ template <typename T> struct RetainPtr { PtrType operator->() const { return t; } T &operator*() const { return *t; } RetainPtr &operator=(PtrType t); - PtrType leakRef() + + template <typename U = PtrType> + std::enable_if_t<IsPointer<U>::value, U> leakRef() CF_RETURNS_RETAINED + { + PtrType s = t; + t = nullptr; + return s; + } + + template <typename U = PtrType> + std::enable_if_t<!IsPointer<U>::value, U> leakRef() NS_RETURNS_RETAINED { PtrType s = t; t = nullptr; return s; } + operator PtrType() const { return t; } operator bool() const { return t; } diff --git a/clang/test/Analysis/Checkers/WebKit/retain-ptr-ctor-adopt-use.mm b/clang/test/Analysis/Checkers/WebKit/retain-ptr-ctor-adopt-use.mm index 7699017..4570561 100644 --- a/clang/test/Analysis/Checkers/WebKit/retain-ptr-ctor-adopt-use.mm +++ b/clang/test/Analysis/Checkers/WebKit/retain-ptr-ctor-adopt-use.mm @@ -104,6 +104,14 @@ void basic_correct_arc() { _number = value; } +- (id)copyWithZone:(NSZone *)zone { + auto copy = adoptNS([(SomeObj *)[SomeObj allocWithZone:zone] init]); + [copy setValue:_number]; + [copy setNext:_next]; + [copy setOther:_other]; + return copy.leakRef(); +} + @end; RetainPtr<CVPixelBufferRef> cf_out_argument() { @@ -151,7 +159,7 @@ NSArray *makeArray() NS_RETURNS_RETAINED { extern Class (*getNSArrayClass)(); NSArray *allocArrayInstance() NS_RETURNS_RETAINED { - return [[getNSArrayClass() alloc] init]; + return adoptNS([[getNSArrayClass() alloc] init]).leakRef(); } extern int (*GetObj)(CF_RETURNS_RETAINED CFTypeRef* objOut); @@ -294,7 +302,7 @@ RetainPtr<CFArrayRef> adopt_make_array() { } -(NSString *)make_string { - return [[NSString alloc] initWithUTF8String:"hello"]; + return adoptNS([[NSString alloc] initWithUTF8String:"hello"]).leakRef(); } -(void)local_leak_string { diff --git a/clang/test/CIR/CodeGen/complex.cpp b/clang/test/CIR/CodeGen/complex.cpp index 2d58c38..ae69b24 100644 --- a/clang/test/CIR/CodeGen/complex.cpp +++ b/clang/test/CIR/CodeGen/complex.cpp @@ -1140,7 +1140,8 @@ void real_on_scalar_glvalue() { // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a"] // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b", init] // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.float>, !cir.float -// CIR: cir.store{{.*}} %[[TMP_A]], %[[B_ADDR]] : !cir.float, !cir.ptr<!cir.float> +// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.float -> !cir.float +// CIR: cir.store{{.*}} %[[A_REAL]], %[[B_ADDR]] : !cir.float, !cir.ptr<!cir.float> // LLVM: %[[A_ADDR:.*]] = alloca float, i64 1, align 4 // LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4 @@ -1179,7 +1180,8 @@ void real_on_scalar_with_type_promotion() { // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["b", init] // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.f16>, !cir.f16 // CIR: %[[TMP_A_F32:.*]] = cir.cast floating %[[TMP_A]] : !cir.f16 -> !cir.float -// CIR: %[[TMP_A_F16:.*]] = cir.cast floating %[[TMP_A_F32]] : !cir.float -> !cir.f16 +// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A_F32]] : !cir.float -> !cir.float +// CIR: %[[TMP_A_F16:.*]] = cir.cast floating %[[A_REAL]] : !cir.float -> !cir.f16 // CIR: cir.store{{.*}} %[[TMP_A_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16> // LLVM: %[[A_ADDR:.*]] = alloca half, i64 1, align 2 @@ -1248,7 +1250,8 @@ void real_on_scalar_from_real_with_type_promotion() { // CIR: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float // CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float> // CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float -// CIR: %[[A_REAL_F16:.*]] = cir.cast floating %[[A_REAL_F32]] : !cir.float -> !cir.f16 +// CIR: %[[A_REAL:.*]] = cir.complex.real %[[A_REAL_F32]] : !cir.float -> !cir.float +// CIR: %[[A_REAL_F16:.*]] = cir.cast floating %[[A_REAL]] : !cir.float -> !cir.f16 // CIR: cir.store{{.*}} %[[A_REAL_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16> // LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2 @@ -1285,8 +1288,9 @@ void real_on_scalar_from_imag_with_type_promotion() { // CIR: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float // CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float> // CIR: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float -// CIR: %[[A_IMAG_F16:.*]] = cir.cast floating %[[A_IMAG_F32]] : !cir.float -> !cir.f16 -// CIR: cir.store{{.*}} %[[A_IMAG_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16> +// CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_IMAG_F32]] : !cir.float -> !cir.float +// CIR: %[[A_REAL_F16:.*]] = cir.cast floating %[[A_REAL_F32]] : !cir.float -> !cir.f16 +// CIR: cir.store{{.*}} %[[A_REAL_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16> // LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2 // LLVM: %[[B_ADDR]] = alloca half, i64 1, align 2 diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 1dc16f3..96db82a 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -154,3 +154,32 @@ void choose_expr() { // OGCG: %[[B_ADDR:.*]] = alloca %struct.CompleteS, align 4 // OGCG: %[[C_ADDR:.*]] = alloca %struct.CompleteS, align 4 // OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[C_ADDR]], ptr align 4 %[[A_ADDR]], i64 8, i1 false) + +void generic_selection() { + CompleteS a; + CompleteS b; + int c; + CompleteS d = _Generic(c, int : a, default: b); +} + +// CIR: cir.func{{.*}} @_Z17generic_selectionv() +// CIR: %[[A_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr<!rec_CompleteS>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr<!rec_CompleteS>, ["b"] +// CIR: %[[C_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["c"] +// CIR: %[[D_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr<!rec_CompleteS>, ["d", init] +// TODO(cir): Call to default copy constructor should be replaced by `cir.copy` op +// CIR: cir.call @_ZN9CompleteSC1ERKS_(%[[D_ADDR]], %[[A_ADDR]]) nothrow : (!cir.ptr<!rec_CompleteS>, !cir.ptr<!rec_CompleteS>) -> () + +// LLVM: define{{.*}} void @_Z17generic_selectionv() +// LLVM: %1 = alloca %struct.CompleteS, i64 1, align 4 +// LLVM: %2 = alloca %struct.CompleteS, i64 1, align 4 +// LLVM: %3 = alloca i32, i64 1, align 4 +// LLVM: %4 = alloca %struct.CompleteS, i64 1, align 4 +// LLVM: call void @_ZN9CompleteSC1ERKS_(ptr %4, ptr %1) + +// OGCG: define{{.*}} void @_Z17generic_selectionv() +// OGCG: %[[A_ADDR:.*]] = alloca %struct.CompleteS, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca %struct.CompleteS, align 4 +// OGCG: %[[C_ADDR:.*]] = alloca i32, align 4 +// OGCG: %[[D_ADDR:.*]] = alloca %struct.CompleteS, align 4 +// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[D_ADDR]], ptr align 4 %[[A_ADDR]], i64 8, i1 false) diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp index e17ef90..4d0e481 100644 --- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp +++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp @@ -20,6 +20,30 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } destroy { @@ -77,12 +101,59 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } destroy { @@ -165,6 +236,30 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i @@ -172,12 +267,60 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA3:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA3]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } destroy { @@ -279,12 +422,59 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -311,12 +501,40 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> -> !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i // CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } destroy { @@ -399,6 +617,30 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!rec_CtorDtor x 5>, !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } destroy { @@ -478,6 +720,10 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i // +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!rec_CtorDtor>> x 5>> -> !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i @@ -485,12 +731,58 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i // CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } destroy { @@ -589,12 +881,39 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!rec_CtorDtor>> x 5>> -> !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -622,6 +941,31 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i @@ -629,6 +973,29 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array<!rec_CtorDtor x 5>, !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } destroy { @@ -729,12 +1096,59 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array<!rec_CtorDtor x 5>, !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } destroy { @@ -838,9 +1252,37 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!cir.ptr<!rec_CtorDtor> x 5>, !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i +// +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> -> !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> // // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i @@ -848,6 +1290,29 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } destroy { @@ -948,6 +1413,30 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<40> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!cir.ptr<!rec_CtorDtor> x 5>, !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp index 4398216..4687320 100644 --- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp +++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp @@ -15,6 +15,30 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -41,12 +65,59 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -75,6 +146,30 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i @@ -82,12 +177,58 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA3:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA3]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -109,12 +250,59 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -141,12 +329,39 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>> -> !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i // CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -176,6 +391,30 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!rec_NoOps x 5>, !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!rec_NoOps x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -200,6 +439,10 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i // +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!rec_NoOps>> x 5>> -> !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i @@ -207,12 +450,58 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i // CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -231,12 +520,39 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!rec_NoOps>> x 5>> -> !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -265,12 +581,59 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.array<!rec_NoOps x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i // CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<20> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array<!rec_NoOps x 5>, !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!rec_NoOps x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -292,12 +655,59 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.array<!rec_NoOps x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array<!rec_NoOps x 5>, !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!rec_NoOps x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -328,16 +738,67 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!cir.ptr<!rec_NoOps> x 5>, !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>, !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i // +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>> -> !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i // CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -358,6 +819,30 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<40> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!cir.ptr<!rec_NoOps> x 5>, !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>, !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp index 79692d3..db5d578 100644 --- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp +++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s template<typename T> void do_things(unsigned A, unsigned B) { @@ -13,6 +13,31 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -39,12 +64,59 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -73,6 +145,30 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i @@ -80,12 +176,57 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA3:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA3]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -107,12 +248,59 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -138,12 +326,39 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>> -> !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i // CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -172,6 +387,30 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!s32i x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!s32i x 5>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -197,6 +436,10 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i // +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!s32i>> x 5>> -> !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i @@ -204,12 +447,58 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i // CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -228,12 +517,39 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!s32i>> x 5>> -> !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -262,12 +578,60 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.array<!s32i x 5>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i // CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<20> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.array<!s32i x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!s32i x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!s32i x 5>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT:} @@ -289,12 +653,59 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.array<!s32i x 5>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.array<!s32i x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!s32i x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!s32i x 5>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -325,16 +736,67 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!cir.ptr<!s32i> x 5>, !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>, !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i // +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>> -> !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// // CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i // CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[STRIDE]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -356,6 +818,30 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<40> : !u64i // CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i // CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!cir.ptr<!s32i> x 5>, !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>, !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp index 77ff357..65b0365 100644 --- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp +++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp @@ -29,6 +29,34 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -44,12 +72,64 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i // CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR]]) : !u64i // CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -71,6 +151,32 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i @@ -78,12 +184,65 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_PTR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UPPER_BOUND_CAST_3:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS_2:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[NUM_ELTS]]) : !u64i // CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS_2]], %[[SIZEOF_INT]]) : !u64i // CHECK-NEXT: %[[INT_PTR_ALLOCA:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_ALLOCA]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } destroy { @@ -193,6 +352,33 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -208,12 +394,64 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i // CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_INT]]) : !u64i // CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } destroy { @@ -299,6 +537,34 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[SIZEOF_CTORDTOR:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_CTORDTOR]]) : !u64i // CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } destroy { diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp index 4822dd7..07e06f8 100644 --- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp +++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp @@ -23,6 +23,33 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -38,12 +65,65 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// // CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i // CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR]]) : !u64i // CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -63,6 +143,33 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// // CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i @@ -70,12 +177,66 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_PTR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// // CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UPPER_BOUND_CAST_3:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS_2:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[NUM_ELTS]]) : !u64i // CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS_2]], %[[SIZEOF_INT]]) : !u64i // CHECK-NEXT: %[[INT_PTR_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_ALLOCA]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -106,6 +267,33 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -121,12 +309,64 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i // CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_INT]]) : !u64i // CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -155,6 +395,34 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[SIZEOF_NOOPS:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_NOOPS]]) : !u64i // CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp index ddf25de..a3b7dca 100644 --- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp +++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s template<typename T> void do_things(unsigned A, unsigned B) { @@ -21,6 +21,33 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -36,12 +63,65 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } + // CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i // CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR]]) : !u64i // CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -61,6 +141,33 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// // CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i @@ -68,12 +175,64 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_PTR_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UPPER_BOUND_CAST_3:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS_2:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[NUM_ELTS]]) : !u64i // CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS_2]], %[[SIZEOF_INT]]) : !u64i // CHECK-NEXT: %[[INT_PTR_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_ALLOCA]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -104,6 +263,34 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -119,6 +306,32 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i // CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} // +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index // CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i // CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i @@ -126,6 +339,32 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_INT]]) : !u64i // CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} // +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -154,6 +393,33 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i // CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT]]) : !u64i // CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // TODO: Add Init here. // CHECK-NEXT: acc.yield // CHECK-NEXT: } diff --git a/clang/test/CodeGenHLSL/RootSignature.hlsl b/clang/test/CodeGenHLSL/RootSignature.hlsl index bc40bdd..eaff3a9 100644 --- a/clang/test/CodeGenHLSL/RootSignature.hlsl +++ b/clang/test/CodeGenHLSL/RootSignature.hlsl @@ -82,8 +82,8 @@ void RootDescriptorsEntry() {} // checking minLOD, maxLOD // CHECK-SAME: float -1.280000e+02, float 1.280000e+02, -// checking register, space and visibility -// CHECK-SAME: i32 42, i32 0, i32 0} +// checking register, space, visibility and flag +// CHECK-SAME: i32 42, i32 0, i32 0, i32 1} #define SampleStaticSampler \ "StaticSampler(s42, " \ @@ -96,6 +96,7 @@ void RootDescriptorsEntry() {} " borderColor = STATIC_BORDER_COLOR_OPAQUE_WHITE, " \ " minLOD = -128.f, maxLOD = 128.f, " \ " space = 0, visibility = SHADER_VISIBILITY_ALL, " \ + " flags = UINT_BORDER_COLOR" \ ")" [shader("compute"), RootSignature(SampleStaticSampler)] [numthreads(1,1,1)] diff --git a/clang/test/OpenMP/amdgcn_save_temps.c b/clang/test/OpenMP/amdgcn_save_temps.c new file mode 100644 index 0000000..d838bb1 --- /dev/null +++ b/clang/test/OpenMP/amdgcn_save_temps.c @@ -0,0 +1,23 @@ + +// REQUIRES: amdgpu-registered-target + +// RUN: %clang_cc1 -E -fopenmp -x c -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -save-temps=cwd %s -o %t-openmp-amdgcn-amd-amdhsa-gfx90a.i +// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -save-temps=cwd -emit-llvm-bc %s -o %t-x86_64-unknown-unknown.bc +// RUN: %clang_cc1 -fopenmp -x c -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -save-temps=cwd -emit-llvm -fopenmp-is-target-device -x cpp-output %t-openmp-amdgcn-amd-amdhsa-gfx90a.i -fopenmp-host-ir-file-path %t-x86_64-unknown-unknown.bc -o - | FileCheck %s +// expected-no-diagnostics +#ifndef HEADER +#define HEADER + +#define N 1000 + +int test_amdgcn_save_temps() { + int arr[N]; +#pragma omp target + for (int i = 0; i < N; i++) { + arr[i] = 1; + } + return arr[0]; +} +#endif + +// CHECK: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_test_amdgcn_save_temps diff --git a/clang/test/Sema/const-eval.c b/clang/test/Sema/const-eval.c index 11cc7fb..53face9 100644 --- a/clang/test/Sema/const-eval.c +++ b/clang/test/Sema/const-eval.c @@ -138,7 +138,7 @@ EVAL_EXPR(52, &pr24622 == (void *)&PR24622); // We evaluate these by providing 2s' complement semantics in constant // expressions, like we do for integers. -void *PR28739a = (__int128)(unsigned long)-1 + &PR28739a; // expected-warning {{the pointer incremented by 18446744073709551615 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2305843009213693952 elements)}} +void *PR28739a = (__int128)(unsigned long)-1 + &PR28739a; // expected-warning {{the pointer incremented by 18'446'744'073'709'551'615 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2'305'843'009'213'693'952 elements)}} void *PR28739b = &PR28739b + (__int128)(unsigned long)-1; // expected-warning {{refers past the last possible element}} __int128 PR28739c = (&PR28739c + (__int128)(unsigned long)-1) - &PR28739c; // expected-warning {{refers past the last possible element}} void *PR28739d = &(&PR28739d)[(__int128)(unsigned long)-1]; // expected-warning {{refers past the last possible element}} diff --git a/clang/test/Sema/integer-overflow.c b/clang/test/Sema/integer-overflow.c index 30a47aa..ba943f0 100644 --- a/clang/test/Sema/integer-overflow.c +++ b/clang/test/Sema/integer-overflow.c @@ -143,7 +143,7 @@ uint64_t check_integer_overflows(int i) { (__imag__ x) = 4608 * 1024 * 1024; // expected-warning@+4 {{overflow in expression; result is 536'870'912 with type 'int'}} -// expected-warning@+3 {{array index 536870912 is past the end of the array (that has type 'uint64_t[10]' (aka 'unsigned long long[10]'))}} +// expected-warning@+3 {{array index 536'870'912 is past the end of the array (that has type 'uint64_t[10]' (aka 'unsigned long long[10]'))}} // expected-note@+1 {{array 'a' declared here}} uint64_t a[10]; a[4608 * 1024 * 1024] = 1i; diff --git a/clang/test/Sema/unbounded-array-bounds.c b/clang/test/Sema/unbounded-array-bounds.c index b22261a..909286b 100644 --- a/clang/test/Sema/unbounded-array-bounds.c +++ b/clang/test/Sema/unbounded-array-bounds.c @@ -14,11 +14,11 @@ struct S s[]; // expected-warning {{tentative array definition}} expected-note { void f1(void) { ++s[3].a; ++s[7073650413200313099].b; - // addr16-warning@-1 {{array index 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}} - // addr32-warning@-2 {{array index 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178956970 elements)}} - // addr64-warning@-3 {{array index 7073650413200313099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576460752303423488 elements)}} + // addr16-warning@-1 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}} + // addr32-warning@-2 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178'956'970 elements)}} + // addr64-warning@-3 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576'460'752'303'423'488 elements)}} ++s[7073650].c; - // addr16-warning@-1 {{array index 7073650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}} + // addr16-warning@-1 {{array index 7'073'650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}} } long long ll[]; // expected-warning {{tentative array definition}} expected-note {{declared here}} addr16-note {{declared here}} addr32-note {{declared here}} @@ -26,32 +26,32 @@ long long ll[]; // expected-warning {{tentative array definition}} expected-note void f2(void) { ++ll[3]; ++ll[2705843009213693952]; - // addr16-warning@-1 {{array index 2705843009213693952 refers past the last possible element for an array in 16-bit address space containing 64-bit (8-byte) elements (max possible 8192 elements)}} - // addr32-warning@-2 {{array index 2705843009213693952 refers past the last possible element for an array in 32-bit address space containing 64-bit (8-byte) elements (max possible 536870912 elements)}} - // addr64-warning@-3 {{array index 2705843009213693952 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2305843009213693952 elements)}} + // addr16-warning@-1 {{array index 2'705'843'009'213'693'952 refers past the last possible element for an array in 16-bit address space containing 64-bit (8-byte) elements (max possible 8'192 elements)}} + // addr32-warning@-2 {{array index 2'705'843'009'213'693'952 refers past the last possible element for an array in 32-bit address space containing 64-bit (8-byte) elements (max possible 536'870'912 elements)}} + // addr64-warning@-3 {{array index 2'705'843'009'213'693'952 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2'305'843'009'213'693'952 elements)}} ++ll[847073650]; - // addr16-warning@-1 {{array index 847073650 refers past the last possible element for an array in 16-bit address space containing 64-bit (8-byte) elements (max possible 8192 elements)}} - // addr32-warning@-2 {{array index 847073650 refers past the last possible element for an array in 32-bit address space containing 64-bit (8-byte) elements (max possible 536870912 elements)}} + // addr16-warning@-1 {{array index 847'073'650 refers past the last possible element for an array in 16-bit address space containing 64-bit (8-byte) elements (max possible 8'192 elements)}} + // addr32-warning@-2 {{array index 847'073'650 refers past the last possible element for an array in 32-bit address space containing 64-bit (8-byte) elements (max possible 536'870'912 elements)}} } void f3(struct S p[]) { // expected-note {{declared here}} addr16-note {{declared here}} ++p[3].a; ++p[7073650413200313099].b; - // addr16-warning@-1 {{array index 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}} - // addr32-warning@-2 {{array index 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178956970 elements)}} - // addr64-warning@-3 {{array index 7073650413200313099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576460752303423488 elements)}} + // addr16-warning@-1 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}} + // addr32-warning@-2 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178'956'970 elements)}} + // addr64-warning@-3 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576'460'752'303'423'488 elements)}} ++p[7073650].c; - // addr16-warning@-1 {{array index 7073650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}} + // addr16-warning@-1 {{array index 7'073'650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}} } void f4(struct S *p) { // expected-note {{declared here}} addr16-note {{declared here}} p += 3; p += 7073650413200313099; - // addr16-warning@-1 {{the pointer incremented by 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}} - // addr32-warning@-2 {{the pointer incremented by 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178956970 elements)}} - // addr64-warning@-3 {{the pointer incremented by 7073650413200313099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576460752303423488 elements)}} + // addr16-warning@-1 {{the pointer incremented by 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}} + // addr32-warning@-2 {{the pointer incremented by 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178'956'970 elements)}} + // addr64-warning@-3 {{the pointer incremented by 7'073'650'413'200'313'099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576'460'752'303'423'488 elements)}} p += 7073650; - // addr16-warning@-1 {{the pointer incremented by 7073650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}} + // addr16-warning@-1 {{the pointer incremented by 7'073'650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}} } struct BQ { @@ -63,7 +63,7 @@ struct BQ bq[]; // expected-warning {{tentative array definition}} addr16-note { void f5(void) { ++bq[0].bigblock[0].a; ++bq[1].bigblock[0].a; - // addr16-warning@-1 {{array index 1 refers past the last possible element for an array in 16-bit address space containing 497952-bit (62244-byte) elements (max possible 1 element)}} + // addr16-warning@-1 {{array index 1 refers past the last possible element for an array in 16-bit address space containing 497952-bit (62'244-byte) elements (max possible 1 element)}} } void f6(void) { @@ -102,15 +102,15 @@ struct { void fam_ily() { ++fam.tail[7073650413200313099]; - // addr16-warning@-1 {{array index 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65536 elements)}} - // addr32-warning@-2 {{array index 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4294967296 elements)}} + // addr16-warning@-1 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65'536 elements)}} + // addr32-warning@-2 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4'294'967'296 elements)}} // No warning for addr64 because the array index is inbound in that case. ++fam0.tail[7073650413200313099]; - // addr16-warning@-1 {{array index 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65536 elements)}} - // addr32-warning@-2 {{array index 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4294967296 elements)}} + // addr16-warning@-1 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65'536 elements)}} + // addr32-warning@-2 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4'294'967'296 elements)}} // No warning for addr64 because the array index is inbound in that case. ++fam1.tail[7073650413200313099]; - // addr16-warning@-1 {{array index 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65536 elements)}} - // addr32-warning@-2 {{array index 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4294967296 elements)}} + // addr16-warning@-1 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65'536 elements)}} + // addr32-warning@-2 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4'294'967'296 elements)}} // No warning for addr64 because the array index is inbound in that case. } diff --git a/clang/test/SemaCXX/array-bounds.cpp b/clang/test/SemaCXX/array-bounds.cpp index b584e1e..6a40d1d 100644 --- a/clang/test/SemaCXX/array-bounds.cpp +++ b/clang/test/SemaCXX/array-bounds.cpp @@ -237,7 +237,7 @@ void test_pr10771() { ((char*)foo)[sizeof(foo) - 1] = '\0'; // no-warning *(((char*)foo) + sizeof(foo) - 1) = '\0'; // no-warning - ((char*)foo)[sizeof(foo)] = '\0'; // expected-warning {{array index 32768 is past the end of the array (that has type 'double[4096]', cast to 'char *')}} + ((char*)foo)[sizeof(foo)] = '\0'; // expected-warning {{array index 32'768 is past the end of the array (that has type 'double[4096]', cast to 'char *')}} // TODO: This should probably warn, too. *(((char*)foo) + sizeof(foo)) = '\0'; // no-warning @@ -248,7 +248,7 @@ int test_pr11007_aux(const char * restrict, ...); // Test checking with varargs. void test_pr11007() { double a[5]; // expected-note {{array 'a' declared here}} - test_pr11007_aux("foo", a[1000]); // expected-warning {{array index 1000 is past the end of the array (that has type 'double[5]')}} + test_pr11007_aux("foo", a[1000]); // expected-warning {{array index 1'000 is past the end of the array (that has type 'double[5]')}} } void test_rdar10916006(void) diff --git a/clang/test/SemaCXX/builtin-assume-aligned.cpp b/clang/test/SemaCXX/builtin-assume-aligned.cpp index 48bd841..afc11cc 100644 --- a/clang/test/SemaCXX/builtin-assume-aligned.cpp +++ b/clang/test/SemaCXX/builtin-assume-aligned.cpp @@ -47,3 +47,9 @@ constexpr void *s1 = __builtin_assume_aligned(x, 32); constexpr void *s2 = __builtin_assume_aligned(x, 32, 5); constexpr void *s3 = __builtin_assume_aligned(x, 32, -1); + +constexpr int add(int a, int b) { + return a+b; +} +constexpr void *c1 = __builtin_assume_aligned(p, add(1,1)); +constexpr void *c2 = __builtin_assume_aligned(p, add(2,1)); // expected-error {{not a power of 2}} diff --git a/clang/test/SemaCXX/constant-expression-cxx14.cpp b/clang/test/SemaCXX/constant-expression-cxx14.cpp index 1743e0e..bea90ff 100644 --- a/clang/test/SemaCXX/constant-expression-cxx14.cpp +++ b/clang/test/SemaCXX/constant-expression-cxx14.cpp @@ -1047,7 +1047,7 @@ constexpr int S = sum(Cs); // expected-error{{must be initialized by a constant constexpr void PR28739(int n) { // cxx14_20-error {{never produces a constant}} int *p = &n; // expected-note {{array 'p' declared here}} p += (__int128)(unsigned long)-1; // cxx14_20-note {{cannot refer to element 18446744073709551615 of non-array object in a constant expression}} - // expected-warning@-1 {{the pointer incremented by 18446744073709551615 refers past the last possible element for an array in 64-bit address space containing 32-bit (4-byte) elements (max possible 4611686018427387904 elements)}} + // expected-warning@-1 {{the pointer incremented by 18'446'744'073'709'551'615 refers past the last possible element for an array in 64-bit address space containing 32-bit (4-byte) elements (max possible 4'611'686'018'427'387'904 elements)}} } constexpr void Void(int n) { diff --git a/clang/test/SemaCXX/integer-overflow.cpp b/clang/test/SemaCXX/integer-overflow.cpp index 73a4e88..214dc11 100644 --- a/clang/test/SemaCXX/integer-overflow.cpp +++ b/clang/test/SemaCXX/integer-overflow.cpp @@ -171,7 +171,7 @@ uint64_t check_integer_overflows(int i) { //expected-note 0+{{declared here}} uint64_t a[10]; a[4608 * 1024 * 1024] = 1; #if __cplusplus < 201103L -// expected-warning@-2 {{array index 536870912 is past the end of the array (that has type 'uint64_t[10]' (aka 'unsigned long long[10]'))}} +// expected-warning@-2 {{array index 536'870'912 is past the end of the array (that has type 'uint64_t[10]' (aka 'unsigned long long[10]'))}} // expected-note@-4 {{array 'a' declared here}} #endif diff --git a/clang/test/SemaHLSL/RootSignature-err.hlsl b/clang/test/SemaHLSL/RootSignature-err.hlsl index 89c684c..debeafe 100644 --- a/clang/test/SemaHLSL/RootSignature-err.hlsl +++ b/clang/test/SemaHLSL/RootSignature-err.hlsl @@ -191,6 +191,10 @@ void basic_validation_5() {} [RootSignature("StaticSampler(s0, mipLODBias = 15.990001)")] void basic_validation_6() {} +// expected-error@+1 {{invalid value of flags}} +[RootSignature("StaticSampler(s0, flags = FLAG_TYPO)")] +void basic_validation_7() {} + // expected-error@+1 {{sampler and non-sampler resource mixed in descriptor table}} [RootSignature("DescriptorTable(Sampler(s0), CBV(b0))")] void mixed_resource_table() {} diff --git a/clang/test/SemaHLSL/RootSignature-flags-err.hlsl b/clang/test/SemaHLSL/RootSignature-flags-err.hlsl index 9449d33..c79e692 100644 --- a/clang/test/SemaHLSL/RootSignature-flags-err.hlsl +++ b/clang/test/SemaHLSL/RootSignature-flags-err.hlsl @@ -2,7 +2,8 @@ // RUN: -fdx-rootsignature-version=rootsig_1_0 %s -verify=v10 // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.3-library -x hlsl -fsyntax-only \ // RUN: -fdx-rootsignature-version=rootsig_1_1 %s -verify=v11 - +// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.3-library -x hlsl -fsyntax-only \ +// RUN: -fdx-rootsignature-version=rootsig_1_2 %s -verify=v12 // Root Descriptor Flags: // v10-error@+1 {{invalid flags for version 1.0}} @@ -13,8 +14,9 @@ void bad_root_descriptor_flags_0() {} [RootSignature("CBV(b0, flags = DATA_STATIC_WHILE_SET_AT_EXECUTE)")] void bad_root_descriptor_flags_1() {} -// v10-error@+2 {{invalid flags for version 1.0}} -// v11-error@+1 {{invalid flags for version 1.1}} +// v10-error@+3 {{invalid flags for version 1.0}} +// v11-error@+2 {{invalid flags for version 1.1}} +// v12-error@+1 {{invalid flags for version 1.2}} [RootSignature("CBV(b0, flags = DATA_STATIC | DATA_VOLATILE)")] void bad_root_descriptor_flags_2() {} @@ -40,18 +42,20 @@ void bad_descriptor_range_flags_3() {} [RootSignature("DescriptorTable(CBV(b0, flags = DESCRIPTORS_STATIC_KEEPING_BUFFER_BOUNDS_CHECKS))")] void bad_descriptor_range_flags_4() {} -// v10-error@+2 {{invalid flags for version 1.0}} -// v11-error@+1 {{invalid flags for version 1.1}} +// v10-error@+3 {{invalid flags for version 1.0}} +// v11-error@+2 {{invalid flags for version 1.1}} +// v12-error@+1 {{invalid flags for version 1.2}} [RootSignature("DescriptorTable(CBV(b0, flags = DATA_STATIC | DATA_STATIC_WHILE_SET_AT_EXECUTE))")] void bad_descriptor_range_flags_5() {} -// v10-error@+2 {{invalid flags for version 1.0}} -// v11-error@+1 {{invalid flags for version 1.1}} +// v10-error@+3 {{invalid flags for version 1.0}} +// v11-error@+2 {{invalid flags for version 1.1}} +// v12-error@+1 {{invalid flags for version 1.2}} [RootSignature("DescriptorTable(CBV(b0, flags = DESCRIPTORS_VOLATILE | DESCRIPTORS_STATIC_KEEPING_BUFFER_BOUNDS_CHECKS))")] void bad_descriptor_range_flags_6() {} -// v10-error@+2 {{invalid flags for version 1.0}} -// v11-error@+1 {{invalid flags for version 1.1}} +// v10-error@+3 {{invalid flags for version 1.0}} +// v11-error@+2 {{invalid flags for version 1.1}} +// v12-error@+1 {{invalid flags for version 1.2}} [RootSignature("DescriptorTable(CBV(b0, flags = DESCRIPTORS_VOLATILE | DATA_STATIC))")] void bad_descriptor_range_flags_7() {} - diff --git a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp index be658aca..1419b8c 100644 --- a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp +++ b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp @@ -608,10 +608,10 @@ Expected<StringRef> linkDevice(ArrayRef<StringRef> InputFiles, Error containerizeRawImage(std::unique_ptr<MemoryBuffer> &Img, OffloadKind Kind, const ArgList &Args) { llvm::Triple Triple(Args.getLastArgValue(OPT_triple_EQ)); - if (Kind != OFK_OpenMP || !Triple.isSPIRV() || - Triple.getVendor() != llvm::Triple::Intel) - return Error::success(); - return offloading::intel::containerizeOpenMPSPIRVImage(Img); + if (Kind == OFK_OpenMP && Triple.isSPIRV() && + Triple.getVendor() == llvm::Triple::Intel) + return offloading::intel::containerizeOpenMPSPIRVImage(Img); + return Error::success(); } Expected<StringRef> writeOffloadFile(const OffloadFile &File) { diff --git a/clang/tools/clang-sycl-linker/ClangSYCLLinker.cpp b/clang/tools/clang-sycl-linker/ClangSYCLLinker.cpp index 8dd993f..594c79a 100644 --- a/clang/tools/clang-sycl-linker/ClangSYCLLinker.cpp +++ b/clang/tools/clang-sycl-linker/ClangSYCLLinker.cpp @@ -27,22 +27,16 @@ #include "llvm/LTO/LTO.h" #include "llvm/Linker/Linker.h" #include "llvm/MC/TargetRegistry.h" -#include "llvm/Object/Archive.h" -#include "llvm/Object/ArchiveWriter.h" #include "llvm/Object/Binary.h" -#include "llvm/Object/ELFObjectFile.h" #include "llvm/Object/IRObjectFile.h" -#include "llvm/Object/ObjectFile.h" #include "llvm/Object/OffloadBinary.h" #include "llvm/Option/ArgList.h" #include "llvm/Option/OptTable.h" #include "llvm/Option/Option.h" -#include "llvm/Remarks/HotnessThresholdParser.h" #include "llvm/Support/CommandLine.h" -#include "llvm/Support/FileOutputBuffer.h" #include "llvm/Support/FileSystem.h" +#include "llvm/Support/FormatVariadic.h" #include "llvm/Support/InitLLVM.h" -#include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Path.h" #include "llvm/Support/Program.h" #include "llvm/Support/Signals.h" diff --git a/clang/unittests/Frontend/CompilerInstanceTest.cpp b/clang/unittests/Frontend/CompilerInstanceTest.cpp index 36cac5a..cd3fefa 100644 --- a/clang/unittests/Frontend/CompilerInstanceTest.cpp +++ b/clang/unittests/Frontend/CompilerInstanceTest.cpp @@ -33,7 +33,7 @@ TEST(CompilerInstance, DefaultVFSOverlayFromInvocation) { SmallString<256> CurrentPath; sys::fs::current_path(CurrentPath); - sys::fs::make_absolute(CurrentPath, FileName); + sys::path::make_absolute(CurrentPath, FileName); // Mount the VFS file itself on the path 'virtual.file'. Makes this test // a bit shorter than creating a new dummy file just for this purpose. diff --git a/clang/unittests/Lex/LexHLSLRootSignatureTest.cpp b/clang/unittests/Lex/LexHLSLRootSignatureTest.cpp index 01f8d4f..82f1968 100644 --- a/clang/unittests/Lex/LexHLSLRootSignatureTest.cpp +++ b/clang/unittests/Lex/LexHLSLRootSignatureTest.cpp @@ -226,6 +226,9 @@ TEST_F(LexHLSLRootSignatureTest, ValidLexAllTokensTest) { STATIC_BORDER_COLOR_OPAQUE_WHITE STATIC_BORDER_COLOR_OPAQUE_BLACK_UINT STATIC_BORDER_COLOR_OPAQUE_WHITE_UINT + + UINT_BORDER_COLOR + NON_NORMALIZED_COORDINATES )cc"; hlsl::RootSignatureLexer Lexer(Source); diff --git a/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp b/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp index 9b9f5dd..f7e9d2d 100644 --- a/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp +++ b/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp @@ -263,7 +263,8 @@ TEST_F(ParseHLSLRootSignatureTest, ValidParseStaticSamplerTest) { filter = FILTER_MAXIMUM_MIN_POINT_MAG_LINEAR_MIP_POINT, maxLOD = 9000, addressU = TEXTURE_ADDRESS_MIRROR, comparisonFunc = COMPARISON_NOT_EQUAL, - borderColor = STATIC_BORDER_COLOR_OPAQUE_BLACK_UINT + borderColor = STATIC_BORDER_COLOR_OPAQUE_BLACK_UINT, + flags = 0 ) )cc"; @@ -336,6 +337,37 @@ TEST_F(ParseHLSLRootSignatureTest, ValidParseStaticSamplerTest) { ASSERT_TRUE(Consumer->isSatisfied()); } +TEST_F(ParseHLSLRootSignatureTest, ValidStaticSamplerFlagsTest) { + const llvm::StringLiteral Source = R"cc( + StaticSampler(s0, flags = UINT_BORDER_COLOR | NON_NORMALIZED_COORDINATES) + )cc"; + + auto Ctx = createMinimalASTContext(); + StringLiteral *Signature = wrapSource(Ctx, Source); + + TrivialModuleLoader ModLoader; + auto PP = createPP(Source, ModLoader); + + hlsl::RootSignatureParser Parser(RootSignatureVersion::V1_1, Signature, *PP); + + // Test no diagnostics produced + Consumer->setNoDiag(); + + ASSERT_FALSE(Parser.parse()); + + auto Elements = Parser.getElements(); + ASSERT_EQ(Elements.size(), 1u); + + RootElement Elem = Elements[0].getElement(); + ASSERT_TRUE(std::holds_alternative<StaticSampler>(Elem)); + auto ValidStaticSamplerFlags = + llvm::dxbc::StaticSamplerFlags::NonNormalizedCoordinates | + llvm::dxbc::StaticSamplerFlags::UintBorderColor; + ASSERT_EQ(std::get<StaticSampler>(Elem).Flags, ValidStaticSamplerFlags); + + ASSERT_TRUE(Consumer->isSatisfied()); +} + TEST_F(ParseHLSLRootSignatureTest, ValidParseFloatsTest) { const llvm::StringLiteral Source = R"cc( StaticSampler(s0, mipLODBias = 0), diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp index f73b0aecc..74f29ac 100644 --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -133,28 +133,20 @@ static BasicType ParseBasicType(char c) { switch (c) { case 'c': return BasicType::Int8; - break; case 's': return BasicType::Int16; - break; case 'i': return BasicType::Int32; - break; case 'l': return BasicType::Int64; - break; case 'x': return BasicType::Float16; - break; case 'f': return BasicType::Float32; - break; case 'd': return BasicType::Float64; - break; case 'y': return BasicType::BFloat16; - break; default: return BasicType::Unknown; } diff --git a/clang/utils/perf-training/CMakeLists.txt b/clang/utils/perf-training/CMakeLists.txt index 1d7bb78..2cd4c4c 100644 --- a/clang/utils/perf-training/CMakeLists.txt +++ b/clang/utils/perf-training/CMakeLists.txt @@ -6,6 +6,10 @@ set(CLANG_PGO_TRAINING_DATA "${CMAKE_CURRENT_SOURCE_DIR}" CACHE PATH set(CLANG_PGO_TRAINING_DATA_SOURCE_DIR OFF CACHE STRING "Path to source directory containing cmake project with source files to use for generating pgo data") set(CLANG_PGO_TRAINING_DEPS "" CACHE STRING "Extra dependencies needed to build the PGO training data.") +add_custom_target(clear-perf-data + COMMAND "${Python3_EXECUTABLE}" ${CMAKE_CURRENT_SOURCE_DIR}/perf-helper.py clean ${CMAKE_CURRENT_BINARY_DIR} perf.data + COMMENT "Clearing old perf data") + option(CLANG_PGO_TRAINING_USE_LLVM_BUILD "Use LLVM build for generating PGO data" ON) llvm_canonicalize_cmake_booleans( @@ -21,7 +25,7 @@ if(LLVM_BUILD_INSTRUMENTED) add_lit_testsuite(generate-profraw "Generating clang PGO data" ${CMAKE_CURRENT_BINARY_DIR}/pgo-data/ EXCLUDE_FROM_CHECK_ALL - DEPENDS clear-profraw + DEPENDS clear-profraw clang ) add_custom_target(clear-profraw @@ -55,6 +59,32 @@ if(LLVM_BUILD_INSTRUMENTED) USE_TOOLCHAIN EXLUDE_FROM_ALL NO_INSTALL DEPENDS generate-profraw) add_dependencies(generate-profdata generate-profraw-external) endif() + + if(NOT LLVM_PROFGEN) + find_program(LLVM_PROFGEN llvm-profgen) + endif() + + if(NOT LLVM_PROFGEN) + message(STATUS "To enable converting CSSPGO samples LLVM_PROFGEN has to point to llvm-profgen") + elseif(NOT CLANG_PGO_TRAINING_DATA_SOURCE_DIR) + message(STATUS "CLANG_PGO_TRAINING_DATA_SOURCE_DIR must be set to collect CSSPGO samples") + else() + set(PERF_HELPER "${Python3_EXECUTABLE}" ${CMAKE_CURRENT_SOURCE_DIR}/perf-helper.py) + set(CLANG_SPROFDATA ${CMAKE_CURRENT_BINARY_DIR}/clang.sprofdata) + add_custom_command( + OUTPUT ${CLANG_SPROFDATA} + # Execute generate-profraw-external under perf + COMMAND ${PERF_HELPER} perf --csspgo -- ${CMAKE_COMMAND} --build ${CMAKE_BINARY_DIR} --target generate-profraw-external + # Convert perf profile into profraw + COMMAND ${PERF_HELPER} perf2prof ${LLVM_PROFGEN} $<TARGET_FILE:clang> ${CMAKE_CURRENT_BINARY_DIR} + # Merge profdata + COMMAND ${PERF_HELPER} merge --sample ${LLVM_PROFDATA} ${CLANG_SPROFDATA} ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS clang ${CLANG_PGO_TRAINING_DEPS} clear-perf-data generate-profraw-external-clean + VERBATIM + USES_TERMINAL + ) + add_custom_target(generate-sprofdata DEPENDS ${CLANG_SPROFDATA}) + endif() endif() endif() @@ -104,8 +134,4 @@ if(CLANG_BOLT AND NOT LLVM_BUILD_INSTRUMENTED) COMMAND "${Python3_EXECUTABLE}" ${CMAKE_CURRENT_SOURCE_DIR}/perf-helper.py clean ${CMAKE_CURRENT_BINARY_DIR} fdata COMMENT "Clearing old BOLT fdata") - add_custom_target(clear-perf-data - COMMAND "${Python3_EXECUTABLE}" ${CMAKE_CURRENT_SOURCE_DIR}/perf-helper.py clean ${CMAKE_CURRENT_BINARY_DIR} perf.data - COMMENT "Clearing old perf data") - endif() diff --git a/clang/utils/perf-training/perf-helper.py b/clang/utils/perf-training/perf-helper.py index ab4491d..1c7904e 100644 --- a/clang/utils/perf-training/perf-helper.py +++ b/clang/utils/perf-training/perf-helper.py @@ -45,14 +45,22 @@ def clean(args): def merge(args): - if len(args) < 3: - print( - "Usage: %s merge <llvm-profdata> <output> <paths>\n" % __file__ - + "\tMerges all profraw files from path into output." - ) - return 1 - cmd = [args[0], "merge", "-o", args[1]] - for path in args[2:]: + parser = argparse.ArgumentParser( + prog="perf-helper merge", + description="Merges all profraw files from path(s) into output", + ) + parser.add_argument("profdata", help="Path to llvm-profdata tool") + parser.add_argument("output", help="Output filename") + parser.add_argument( + "paths", nargs="+", help="Folder(s) containing input profraw files" + ) + parser.add_argument("--sample", action="store_true", help="Sample profile") + opts = parser.parse_args(args) + + cmd = [opts.profdata, "merge", "-o", opts.output] + if opts.sample: + cmd += ["--sample"] + for path in opts.paths: cmd.extend(findFilesWithExtension(path, "profraw")) subprocess.check_call(cmd) return 0 @@ -73,25 +81,30 @@ def merge_fdata(args): def perf(args): parser = argparse.ArgumentParser( - prog="perf-helper perf", description="perf wrapper for BOLT profile collection" + prog="perf-helper perf", + description="perf wrapper for BOLT/CSSPGO profile collection", ) parser.add_argument( "--lbr", action="store_true", help="Use perf with branch stacks" ) + parser.add_argument("--csspgo", action="store_true", help="Enable CSSPGO flags") parser.add_argument("cmd", nargs=argparse.REMAINDER, help="") opts = parser.parse_args(args) cmd = opts.cmd[1:] + event = "br_inst_retired.near_taken:uppp" if opts.csspgo else "cycles:u" perf_args = [ "perf", "record", - "--event=cycles:u", + f"--event={event}", "--freq=max", "--output=%d.perf.data" % os.getpid(), ] - if opts.lbr: + if opts.lbr or opts.csspgo: perf_args += ["--branch-filter=any,u"] + if opts.csspgo: + perf_args += ["-g", "--call-graph=fp"] perf_args.extend(cmd) start_time = time.time() @@ -127,6 +140,30 @@ def perf2bolt(args): return 0 +def perf2prof(args): + parser = argparse.ArgumentParser( + prog="perf-helper perf2prof", + description="perf to CSSPGO prof conversion wrapper", + ) + parser.add_argument("profgen", help="Path to llvm-profgen binary") + parser.add_argument("binary", help="Input binary") + parser.add_argument("paths", nargs="+", help="Path containing perf.data files") + opts = parser.parse_args(args) + + profgen_args = [opts.profgen, f"--binary={opts.binary}"] + for path in opts.paths: + for filename in findFilesWithExtension(path, "perf.data"): + subprocess.run( + [ + *profgen_args, + f"--perfdata={filename}", + f"--output={filename}.profraw", + ], + check=True, + ) + return 0 + + def dtrace(args): parser = argparse.ArgumentParser( prog="perf-helper dtrace", @@ -660,7 +697,10 @@ def bolt_optimize(args): process.check_returncode() if opts.method in ["PERF", "LBR"]: - perf2bolt([opts.bolt, opts.perf_training_binary_dir, opts.input]) + args = [opts.bolt, opts.perf_training_binary_dir, opts.input] + if opts.method == "LBR": + args.extend("--lbr") + perf2bolt(args) merge_fdata([opts.merge_fdata, opts.fdata, opts.perf_training_binary_dir]) @@ -707,6 +747,7 @@ commands = { "merge-fdata": merge_fdata, "perf": perf, "perf2bolt": perf2bolt, + "perf2prof": perf2prof, } diff --git a/compiler-rt/lib/builtins/CMakeLists.txt b/compiler-rt/lib/builtins/CMakeLists.txt index 0d7fc65..9095b05 100644 --- a/compiler-rt/lib/builtins/CMakeLists.txt +++ b/compiler-rt/lib/builtins/CMakeLists.txt @@ -816,14 +816,15 @@ set(s390x_SOURCES ${GENERIC_TF_SOURCES} ) -set(wasm32_SOURCES - ${GENERIC_TF_SOURCES} - ${GENERIC_SOURCES} -) -set(wasm64_SOURCES + +set(wasm_SOURCES + wasm/__c_longjmp.S + wasm/__cpp_exceptions.S ${GENERIC_TF_SOURCES} ${GENERIC_SOURCES} ) +set(wasm32_SOURCES ${wasm_SOURCES}) +set(wasm64_SOURCES ${wasm_SOURCES}) set(ve_SOURCES ve/grow_stack.S diff --git a/compiler-rt/lib/builtins/wasm/__c_longjmp.S b/compiler-rt/lib/builtins/wasm/__c_longjmp.S new file mode 100644 index 0000000..d130862 --- /dev/null +++ b/compiler-rt/lib/builtins/wasm/__c_longjmp.S @@ -0,0 +1,26 @@ +//===-- __c_longjmp.S - Implement __c_longjmp -----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements __c_longjmp which LLVM uses to implenmet setjmp/longjmp +// when Wasm EH is enabled. +// +//===----------------------------------------------------------------------===// + +#ifdef __wasm_exception_handling__ + +#ifdef __wasm64__ +#define PTR i64 +#else +#define PTR i32 +#endif + +.globl __c_longjmp +.tagtype __c_longjmp PTR +__c_longjmp: + +#endif // !__wasm_exception_handling__ diff --git a/compiler-rt/lib/builtins/wasm/__cpp_exception.S b/compiler-rt/lib/builtins/wasm/__cpp_exception.S new file mode 100644 index 0000000..0496e1d --- /dev/null +++ b/compiler-rt/lib/builtins/wasm/__cpp_exception.S @@ -0,0 +1,26 @@ +//===-- __cpp_exception.S - Implement __cpp_exception ---------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements __cpp_exception which LLVM uses to implement exception +// handling when Wasm EH is enabled. +// +//===----------------------------------------------------------------------===// + +#ifdef __wasm_exception_handling__ + +#ifdef __wasm64__ +#define PTR i64 +#else +#define PTR i32 +#endif + +.globl __cpp_exception +.tagtype __cpp_exception PTR +__cpp_exception: + +#endif // !__wasm_exception_handling__ diff --git a/compiler-rt/test/asan/TestCases/wcscat.cpp b/compiler-rt/test/asan/TestCases/wcscat.cpp index dcdff88..f0a8ec1 100644 --- a/compiler-rt/test/asan/TestCases/wcscat.cpp +++ b/compiler-rt/test/asan/TestCases/wcscat.cpp @@ -1,26 +1,26 @@ -// RUN: %clangxx_asan -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK -// RUN: %clangxx_asan -O1 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK -// RUN: %clangxx_asan -O2 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK -// RUN: %clangxx_asan -O3 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O0 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O1 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O2 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O3 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK #include <stdio.h> #include <wchar.h> int main() { - wchar_t *start = L"X means "; - wchar_t *append = L"dog"; + const wchar_t *start = L"X means "; + const wchar_t *append = L"dog"; wchar_t goodDst[12]; wcscpy(goodDst, start); wcscat(goodDst, append); wchar_t badDst[9]; wcscpy(badDst, start); - printf("Good so far.\n"); + fprintf(stderr, "Good so far.\n"); // CHECK: Good so far. - fflush(stdout); + fflush(stderr); wcscat(badDst, append); // Boom! // CHECK: ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}} - // CHECK: WRITE of size {{[0-9]+}} at [[ADDR:0x[0-9a-f]+]] thread T0 - // CHECK: #0 [[ADDR:0x[0-9a-f]+]] in wcscat{{.*}}sanitizer_common_interceptors.inc:{{[0-9]+}} + // CHECK: WRITE of size {{[0-9]+}} at [[ADDR]] thread T0 + // CHECK: #0 {{0x[0-9a-f]+}} in wcscat printf("Should have failed with ASAN error.\n"); }
\ No newline at end of file diff --git a/compiler-rt/test/asan/TestCases/wcscpy.cpp b/compiler-rt/test/asan/TestCases/wcscpy.cpp index 414d833..a280d29 100644 --- a/compiler-rt/test/asan/TestCases/wcscpy.cpp +++ b/compiler-rt/test/asan/TestCases/wcscpy.cpp @@ -1,23 +1,23 @@ -// RUN: %clangxx_asan -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK -// RUN: %clangxx_asan -O1 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK -// RUN: %clangxx_asan -O2 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK -// RUN: %clangxx_asan -O3 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O0 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O1 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O2 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O3 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK #include <stdio.h> #include <wchar.h> int main() { - wchar_t *src = L"X means dog"; + const wchar_t *src = L"X means dog"; wchar_t goodDst[12]; wcscpy(goodDst, src); wchar_t badDst[7]; - printf("Good so far.\n"); + fprintf(stderr, "Good so far.\n"); // CHECK: Good so far. - fflush(stdout); + fflush(stderr); wcscpy(badDst, src); // Boom! - // CHECK:ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}} - // CHECK: WRITE of size {{[0-9]+}} at [[ADDR:0x[0-9a-f]+]] thread T0 - // CHECK: #0 [[ADDR:0x[0-9a-f]+]] in wcscpy{{.*}}asan_interceptors.cpp:{{[0-9]+}} + // CHECK: ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}} + // CHECK: WRITE of size {{[0-9]+}} at [[ADDR]] thread T0 + // CHECK: #0 {{0x[0-9a-f]+}} in wcscpy printf("Should have failed with ASAN error.\n"); }
\ No newline at end of file diff --git a/compiler-rt/test/asan/TestCases/wcsncat.cpp b/compiler-rt/test/asan/TestCases/wcsncat.cpp index 3ab7fc8..eb7d095 100644 --- a/compiler-rt/test/asan/TestCases/wcsncat.cpp +++ b/compiler-rt/test/asan/TestCases/wcsncat.cpp @@ -1,14 +1,14 @@ -// RUN: %clangxx_asan -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK -// RUN: %clangxx_asan -O1 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK -// RUN: %clangxx_asan -O2 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK -// RUN: %clangxx_asan -O3 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O0 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O1 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O2 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O3 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK #include <stdio.h> #include <wchar.h> int main() { - wchar_t *start = L"X means "; - wchar_t *append = L"dog"; + const wchar_t *start = L"X means "; + const wchar_t *append = L"dog"; wchar_t goodDst[15]; wcscpy(goodDst, start); wcsncat(goodDst, append, 5); @@ -16,12 +16,12 @@ int main() { wchar_t badDst[11]; wcscpy(badDst, start); wcsncat(badDst, append, 1); - printf("Good so far.\n"); + fprintf(stderr, "Good so far.\n"); // CHECK: Good so far. - fflush(stdout); + fflush(stderr); wcsncat(badDst, append, 3); // Boom! // CHECK: ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}} - // CHECK: WRITE of size {{[0-9]+}} at [[ADDR:0x[0-9a-f]+]] thread T0 - // CHECK: #0 [[ADDR:0x[0-9a-f]+]] in wcsncat{{.*}}sanitizer_common_interceptors.inc:{{[0-9]+}} + // CHECK: WRITE of size {{[0-9]+}} at [[ADDR]] thread T0 + // CHECK: #0 {{0x[0-9a-f]+}} in wcsncat printf("Should have failed with ASAN error.\n"); }
\ No newline at end of file diff --git a/compiler-rt/test/asan/TestCases/wcsncpy.cpp b/compiler-rt/test/asan/TestCases/wcsncpy.cpp index 6177b72..1106bf5 100644 --- a/compiler-rt/test/asan/TestCases/wcsncpy.cpp +++ b/compiler-rt/test/asan/TestCases/wcsncpy.cpp @@ -1,25 +1,25 @@ -// RUN: %clangxx_asan -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK -// RUN: %clangxx_asan -O1 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK -// RUN: %clangxx_asan -O2 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK -// RUN: %clangxx_asan -O3 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O0 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O1 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O2 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O3 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK #include <stdio.h> #include <wchar.h> int main() { - wchar_t *src = L"X means dog"; + const wchar_t *src = L"X means dog"; wchar_t goodDst[12]; wcsncpy(goodDst, src, 12); wchar_t badDst[7]; wcsncpy(badDst, src, 7); // This should still work. - printf("Good so far.\n"); + fprintf(stderr, "Good so far.\n"); // CHECK: Good so far. - fflush(stdout); + fflush(stderr); wcsncpy(badDst, src, 15); // Boom! - // CHECK:ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}} - // CHECK: WRITE of size {{[0-9]+}} at [[ADDR:0x[0-9a-f]+]] thread T0 - // CHECK: #0 [[ADDR:0x[0-9a-f]+]] in wcsncpy{{.*}}asan_interceptors.cpp:{{[0-9]+}} + // CHECK: ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}} + // CHECK: WRITE of size {{[0-9]+}} at [[ADDR]] thread T0 + // CHECK: #0 {{0x[0-9a-f]+}} in wcsncpy printf("Should have failed with ASAN error.\n"); }
\ No newline at end of file diff --git a/flang-rt/lib/runtime/derived-api.cpp b/flang-rt/lib/runtime/derived-api.cpp index bb08e03..fe68682 100644 --- a/flang-rt/lib/runtime/derived-api.cpp +++ b/flang-rt/lib/runtime/derived-api.cpp @@ -118,14 +118,26 @@ bool RTDEF(SameTypeAs)(const Descriptor &a, const Descriptor &b) { } bool RTDEF(ExtendsTypeOf)(const Descriptor &a, const Descriptor &mold) { + // The wording of the standard indicates null or unallocated checks take + // precedence over the extension checks which take precedence over any + // compiler specific behavior. + // F'23 16.9.86 p 5 + // If MOLD is unlimited polymorphic and is either a disassociated pointer or + // unallocated allocatable variable, the result is true; auto aType{a.raw().type}; auto moldType{mold.raw().type}; if ((aType != CFI_type_struct && aType != CFI_type_other) || (moldType != CFI_type_struct && moldType != CFI_type_other)) { - // If either type is intrinsic, they must match. - return aType == moldType; - } else if (const typeInfo::DerivedType * - derivedTypeMold{GetDerivedType(mold)}) { + if (!mold.IsAllocated()) { + return true; + } else if (!a.IsAllocated()) { + return false; + } else { + // If either type is intrinsic and not a pointer or allocatable + // then they must match. + return aType == moldType; + } + } else if (const auto *derivedTypeMold{GetDerivedType(mold)}) { // If A is unlimited polymorphic and is either a disassociated pointer or // unallocated allocatable, the result is false. // Otherwise if the dynamic type of A or MOLD is extensible, the result is diff --git a/flang/include/flang/Semantics/openmp-utils.h b/flang/include/flang/Semantics/openmp-utils.h index 08b6716..2954a1c 100644 --- a/flang/include/flang/Semantics/openmp-utils.h +++ b/flang/include/flang/Semantics/openmp-utils.h @@ -37,6 +37,8 @@ template <typename T, typename U = std::remove_const_t<T>> U AsRvalue(T &t) { template <typename T> T &&AsRvalue(T &&t) { return std::move(t); } +const Scope &GetScopingUnit(const Scope &scope); + // There is no consistent way to get the source of an ActionStmt, but there // is "source" in Statement<T>. This structure keeps the ActionStmt with the // extracted source for further use. diff --git a/flang/lib/Semantics/check-omp-structure.cpp b/flang/lib/Semantics/check-omp-structure.cpp index e224e06..1f059f747 100644 --- a/flang/lib/Semantics/check-omp-structure.cpp +++ b/flang/lib/Semantics/check-omp-structure.cpp @@ -1361,9 +1361,19 @@ void OmpStructureChecker::Enter(const parser::OpenMPDeclareSimdConstruct &x) { return; } + auto isValidSymbol{[](const Symbol *sym) { + if (IsProcedure(*sym) || IsFunction(*sym)) { + return true; + } + if (const Symbol *owner{GetScopingUnit(sym->owner()).symbol()}) { + return IsProcedure(*owner) || IsFunction(*owner); + } + return false; + }}; + const parser::OmpArgument &arg{args.v.front()}; if (auto *sym{GetArgumentSymbol(arg)}) { - if (!IsProcedure(*sym) && !IsFunction(*sym)) { + if (!isValidSymbol(sym)) { auto &msg{context_.Say(arg.source, "The name '%s' should refer to a procedure"_err_en_US, sym->name())}; if (sym->test(Symbol::Flag::Implicit)) { diff --git a/flang/lib/Semantics/openmp-utils.cpp b/flang/lib/Semantics/openmp-utils.cpp index 35b7718..a8ec4d6 100644 --- a/flang/lib/Semantics/openmp-utils.cpp +++ b/flang/lib/Semantics/openmp-utils.cpp @@ -41,6 +41,24 @@ namespace Fortran::semantics::omp { using namespace Fortran::parser::omp; +const Scope &GetScopingUnit(const Scope &scope) { + const Scope *iter{&scope}; + for (; !iter->IsTopLevel(); iter = &iter->parent()) { + switch (iter->kind()) { + case Scope::Kind::BlockConstruct: + case Scope::Kind::BlockData: + case Scope::Kind::DerivedType: + case Scope::Kind::MainProgram: + case Scope::Kind::Module: + case Scope::Kind::Subprogram: + return *iter; + default: + break; + } + } + return *iter; +} + SourcedActionStmt GetActionStmt(const parser::ExecutionPartConstruct *x) { if (x == nullptr) { return SourcedActionStmt{}; diff --git a/flang/lib/Semantics/resolve-directives.cpp b/flang/lib/Semantics/resolve-directives.cpp index bd7b8ac..b1eaaa8 100644 --- a/flang/lib/Semantics/resolve-directives.cpp +++ b/flang/lib/Semantics/resolve-directives.cpp @@ -379,24 +379,6 @@ public: explicit OmpAttributeVisitor(SemanticsContext &context) : DirectiveAttributeVisitor(context) {} - static const Scope &scopingUnit(const Scope &scope) { - const Scope *iter{&scope}; - for (; !iter->IsTopLevel(); iter = &iter->parent()) { - switch (iter->kind()) { - case Scope::Kind::BlockConstruct: - case Scope::Kind::BlockData: - case Scope::Kind::DerivedType: - case Scope::Kind::MainProgram: - case Scope::Kind::Module: - case Scope::Kind::Subprogram: - return *iter; - default: - break; - } - } - return *iter; - } - template <typename A> void Walk(const A &x) { parser::Walk(x, *this); } template <typename A> bool Pre(const A &) { return true; } template <typename A> void Post(const A &) {} @@ -2303,14 +2285,17 @@ void OmpAttributeVisitor::CheckPerfectNestAndRectangularLoop( } auto checkPerfectNest = [&, this]() { - auto blockSize = block.size(); - if (blockSize <= 1) + if (block.empty()) return; + auto last = block.end(); + --last; - if (parser::Unwrap<parser::ContinueStmt>(x)) - blockSize -= 1; + // A trailing CONTINUE is not considered part of the loop body + if (parser::Unwrap<parser::ContinueStmt>(*last)) + --last; - if (blockSize <= 1) + // In a perfectly nested loop, the nested loop must be the only statement + if (last == block.begin()) return; // Non-perfectly nested loop @@ -3086,8 +3071,8 @@ void OmpAttributeVisitor::ResolveOmpDesignator( checkScope = ompFlag == Symbol::Flag::OmpExecutableAllocateDirective; } if (checkScope) { - if (scopingUnit(GetContext().scope) != - scopingUnit(symbol->GetUltimate().owner())) { + if (omp::GetScopingUnit(GetContext().scope) != + omp::GetScopingUnit(symbol->GetUltimate().owner())) { context_.Say(designator.source, // 2.15.3 "List items must be declared in the same scoping unit in which the %s directive appears"_err_en_US, parser::ToUpperCaseLetters( diff --git a/flang/test/Lower/OpenMP/wsloop-collapse-continue.f90 b/flang/test/Lower/OpenMP/wsloop-collapse-continue.f90 new file mode 100644 index 0000000..fea7a8b --- /dev/null +++ b/flang/test/Lower/OpenMP/wsloop-collapse-continue.f90 @@ -0,0 +1,19 @@ +! RUN: bbc -fopenmp -emit-hlfir %s -o - | FileCheck %s + +program wsloop_collapse_continue + integer i, j + +! CHECK: omp.wsloop {{.*}} { +! CHECK: omp.loop_nest ({{.*}}) : i32 = ({{.*}}) to ({{.*}}) inclusive step ({{.*}}) collapse(2) { + !$omp do collapse(2) + do 50 i = 1, 42 + do 51 j = 1, 84 +! CHECK: fir.call @_FortranAioOutputInteger32( + print *, i +! CHECK: fir.call @_FortranAioOutputInteger32( + print *, j + 51 continue + 50 continue + !$omp end do + +end program wsloop_collapse_continue diff --git a/flang/test/Semantics/OpenMP/declare-simd.f90 b/flang/test/Semantics/OpenMP/declare-simd.f90 index ceed2c2..bb259b8 100644 --- a/flang/test/Semantics/OpenMP/declare-simd.f90 +++ b/flang/test/Semantics/OpenMP/declare-simd.f90 @@ -19,4 +19,9 @@ end subroutine f01 end +integer function f02 +!Ok, expect no diagnostics +!$omp declare_simd(f02) +end + end module diff --git a/flang/test/Semantics/OpenMP/do08.f90 b/flang/test/Semantics/OpenMP/do08.f90 index bb3c1d0c..5143dff 100644 --- a/flang/test/Semantics/OpenMP/do08.f90 +++ b/flang/test/Semantics/OpenMP/do08.f90 @@ -61,7 +61,6 @@ program omp !$omp end do - !ERROR: Canonical loop nest must be perfectly nested. !ERROR: The value of the parameter in the COLLAPSE or ORDERED clause must not be larger than the number of nested loops following the construct. !$omp do collapse(3) do 60 i=2,200,2 diff --git a/flang/test/Semantics/OpenMP/do13.f90 b/flang/test/Semantics/OpenMP/do13.f90 index 8f7844f..6e9d1dd 100644 --- a/flang/test/Semantics/OpenMP/do13.f90 +++ b/flang/test/Semantics/OpenMP/do13.f90 @@ -59,7 +59,6 @@ program omp !$omp end do - !ERROR: Canonical loop nest must be perfectly nested. !ERROR: The value of the parameter in the COLLAPSE or ORDERED clause must not be larger than the number of nested loops following the construct. !$omp do collapse(3) do 60 i=1,10 diff --git a/libc/src/__support/macros/attributes.h b/libc/src/__support/macros/attributes.h index 145aa3b..d5ff028 100644 --- a/libc/src/__support/macros/attributes.h +++ b/libc/src/__support/macros/attributes.h @@ -81,4 +81,14 @@ LIBC_THREAD_MODE_EXTERNAL. #define LIBC_HAS_VECTOR_TYPE 0 #endif +#if __has_attribute(no_sanitize) +// Disable regular and hardware-supported ASan for functions that may +// intentionally make out-of-bounds access. Disable TSan as well, as it detects +// out-of-bounds accesses to heap memory. +#define LIBC_NO_SANITIZE_OOB_ACCESS \ + __attribute__((no_sanitize("address", "hwaddress", "thread"))) +#else +#define LIBC_NO_SANITIZE_OOB_ACCESS +#endif + #endif // LLVM_LIBC_SRC___SUPPORT_MACROS_ATTRIBUTES_H diff --git a/libc/src/string/CMakeLists.txt b/libc/src/string/CMakeLists.txt index b8cdb2a7..83c9564 100644 --- a/libc/src/string/CMakeLists.txt +++ b/libc/src/string/CMakeLists.txt @@ -22,6 +22,7 @@ add_header_library( libc.src.__support.CPP.type_traits libc.src.__support.CPP.simd libc.src.__support.common + libc.src.__support.macros.attributes libc.src.string.memory_utils.inline_memcpy ${string_config_options} ) diff --git a/libc/src/string/memory_utils/aarch64/inline_strlen.h b/libc/src/string/memory_utils/aarch64/inline_strlen.h index 36fd1aa..87f5ccd 100644 --- a/libc/src/string/memory_utils/aarch64/inline_strlen.h +++ b/libc/src/string/memory_utils/aarch64/inline_strlen.h @@ -17,7 +17,7 @@ namespace LIBC_NAMESPACE_DECL { namespace neon { -[[gnu::no_sanitize_address]] [[maybe_unused]] LIBC_INLINE static size_t +[[maybe_unused]] LIBC_NO_SANITIZE_OOB_ACCESS LIBC_INLINE static size_t string_length(const char *src) { using Vector __attribute__((may_alias)) = uint8x8_t; diff --git a/libc/src/string/memory_utils/generic/inline_strlen.h b/libc/src/string/memory_utils/generic/inline_strlen.h index d7435af..69700e8 100644 --- a/libc/src/string/memory_utils/generic/inline_strlen.h +++ b/libc/src/string/memory_utils/generic/inline_strlen.h @@ -24,8 +24,7 @@ LIBC_INLINE constexpr cpp::simd_mask<char> shift_mask(cpp::simd_mask<char> m, return cpp::bit_cast<cpp::simd_mask<char>>(r); } -[[clang::no_sanitize("address")]] LIBC_INLINE size_t -string_length(const char *src) { +LIBC_NO_SANITIZE_OOB_ACCESS LIBC_INLINE size_t string_length(const char *src) { constexpr cpp::simd<char> null_byte = cpp::splat('\0'); size_t alignment = alignof(cpp::simd<char>); diff --git a/libc/src/string/memory_utils/x86_64/inline_strlen.h b/libc/src/string/memory_utils/x86_64/inline_strlen.h index 739f8c1..9e10d58 100644 --- a/libc/src/string/memory_utils/x86_64/inline_strlen.h +++ b/libc/src/string/memory_utils/x86_64/inline_strlen.h @@ -18,12 +18,12 @@ namespace LIBC_NAMESPACE_DECL { namespace string_length_internal { // Return a bit-mask with the nth bit set if the nth-byte in block_ptr is zero. template <typename Vector, typename Mask> -[[gnu::no_sanitize_address]] LIBC_INLINE static Mask +LIBC_NO_SANITIZE_OOB_ACCESS LIBC_INLINE static Mask compare_and_mask(const Vector *block_ptr); template <typename Vector, typename Mask, decltype(compare_and_mask<Vector, Mask>)> -[[gnu::no_sanitize_address]] LIBC_INLINE static size_t +LIBC_NO_SANITIZE_OOB_ACCESS LIBC_INLINE static size_t string_length_vector(const char *src) { uintptr_t misalign_bytes = reinterpret_cast<uintptr_t>(src) % sizeof(Vector); diff --git a/libc/src/string/string_utils.h b/libc/src/string/string_utils.h index 9d636d0..7feef56 100644 --- a/libc/src/string/string_utils.h +++ b/libc/src/string/string_utils.h @@ -19,6 +19,7 @@ #include "hdr/types/size_t.h" #include "src/__support/CPP/bitset.h" #include "src/__support/CPP/type_traits.h" // cpp::is_same_v +#include "src/__support/macros/attributes.h" #include "src/__support/macros/config.h" #include "src/__support/macros/optimization.h" // LIBC_UNLIKELY #include "src/string/memory_utils/inline_memcpy.h" @@ -119,7 +120,7 @@ template <typename T> LIBC_INLINE size_t string_length(const T *src) { } template <typename Word> -[[gnu::no_sanitize_address]] LIBC_INLINE void * +LIBC_NO_SANITIZE_OOB_ACCESS LIBC_INLINE void * find_first_character_wide_read(const unsigned char *src, unsigned char ch, size_t n) { const unsigned char *char_ptr = src; diff --git a/libcxx/utils/find-rerun-candidates b/libcxx/utils/find-rerun-candidates new file mode 100755 index 0000000..5ac2644 --- /dev/null +++ b/libcxx/utils/find-rerun-candidates @@ -0,0 +1,242 @@ +#!/usr/bin/env python3 + +import argparse +import datetime +import functools +import os +import pathlib +import re +import statistics +import subprocess +import sys + +import git +import pandas +import tqdm + +@functools.total_ordering +class Commit: + """ + This class represents a commit inside a given Git repository. + """ + + def __init__(self, git_repo, sha): + self._git_repo = git_repo + self._sha = sha + + def __eq__(self, other): + """ + Return whether two commits refer to the same commit. + + This doesn't take into account the content of the Git tree at those commits, only the + 'identity' of the commits themselves. + """ + return self.fullrev == other.fullrev + + def __lt__(self, other): + """ + Return whether a commit is an ancestor of another commit in the Git repository. + """ + # Is self._sha an ancestor of other._sha? + res = subprocess.run(['git', '-C', self._git_repo, 'merge-base', '--is-ancestor', self._sha, other._sha]) + if res.returncode not in (0, 1): + raise RuntimeError(f'Error when trying to obtain the commit order for {self._sha} and {other._sha}') + return res.returncode == 0 + + def __hash__(self): + """ + Return the full revision for this commit. + """ + return hash(self.fullrev) + + @functools.cache + def show(self, include_diff=False): + """ + Return the commit information equivalent to `git show` associated to this commit. + """ + cmd = ['git', '-C', self._git_repo, 'show', self._sha] + if not include_diff: + cmd.append('--no-patch') + return subprocess.check_output(cmd, text=True) + + @functools.cached_property + def shortrev(self): + """ + Return the shortened version of the given SHA. + """ + return subprocess.check_output(['git', '-C', self._git_repo, 'rev-parse', '--short', self._sha], text=True).strip() + + @functools.cached_property + def fullrev(self): + """ + Return the full SHA associated to this commit. + """ + return subprocess.check_output(['git', '-C', self._git_repo, 'rev-parse', self._sha], text=True).strip() + + @functools.cached_property + def commit_date(self): + """ + Return the date of the commit as a `datetime.datetime` object. + """ + repo = git.Repo(self._git_repo) + return datetime.datetime.fromtimestamp(repo.commit(self._sha).committed_date) + + def prefetch(self): + """ + Prefetch cached properties associated to this commit object. + + This makes it possible to control when time is spent recovering that information from Git for + e.g. better reporting to the user. + """ + self.commit_date + self.fullrev + self.shortrev + self.show() + + def __str__(self): + return self._sha + +def directory_path(string): + if os.path.isdir(string): + return pathlib.Path(string) + else: + raise NotADirectoryError(string) + +def parse_lnt(lines, aggregate=statistics.median): + """ + Parse lines in LNT format and return a list of dictionnaries of the form: + + [ + { + 'benchmark': <benchmark1>, + <metric1>: [float], + <metric2>: [float], + 'data_points': int, + ... + }, + { + 'benchmark': <benchmark2>, + <metric1>: [float], + <metric2>: [float], + 'data_points': int, + ... + }, + ... + ] + + If a metric has multiple values associated to it, they are aggregated into a single + value using the provided aggregation function. + """ + results = {} + for line in lines: + line = line.strip() + if not line: + continue + + (identifier, value) = line.split(' ') + (benchmark, metric) = identifier.split('.') + if benchmark not in results: + results[benchmark] = {'benchmark': benchmark} + + entry = results[benchmark] + if metric not in entry: + entry[metric] = [] + entry[metric].append(float(value)) + + for (bm, entry) in results.items(): + metrics = [key for key in entry if isinstance(entry[key], list)] + min_data_points = min(len(entry[metric]) for metric in metrics) + for metric in metrics: + entry[metric] = aggregate(entry[metric]) + entry['data_points'] = min_data_points + + return list(results.values()) + +def sorted_revlist(git_repo, commits): + """ + Return the list of commits sorted by their chronological order (from oldest to newest) in the + provided Git repository. Items earlier in the list are older than items later in the list. + """ + revlist_cmd = ['git', '-C', git_repo, 'rev-list', '--no-walk'] + list(commits) + revlist = subprocess.check_output(revlist_cmd, text=True).strip().splitlines() + return list(reversed(revlist)) + +def main(argv): + parser = argparse.ArgumentParser( + prog='find-rerun-candidates', + description='Find benchmarking data points that are good candidates for additional runs, to reduce noise.') + parser.add_argument('directory', type=directory_path, + help='Path to a valid directory containing benchmark data in LNT format, each file being named <commit>.lnt. ' + 'This is also the format generated by the `benchmark-historical` utility.') + parser.add_argument('--metric', type=str, default='execution_time', + help='The metric to analyze. LNT data may contain multiple metrics (e.g. code size, execution time, etc) -- ' + 'this option allows selecting which metric is analyzed for rerun candidates. The default is "execution_time".') + parser.add_argument('--filter', type=str, required=False, + help='An optional regular expression used to filter the benchmarks included in the analysis. ' + 'Only benchmarks whose names match the regular expression will be analyzed.') + parser.add_argument('--outlier-threshold', metavar='FLOAT', type=float, default=0.1, + help='Relative difference from the previous points for considering a data point as an outlier. This threshold is ' + 'expressed as a floating point number, e.g. 0.25 will detect points that differ by more than 25%% from their ' + 'previous result.') + parser.add_argument('--data-points-threshold', type=int, required=False, + help='Number of data points above which an outlier is not considered an outlier. If an outlier has more than ' + 'that number of data points yet its relative difference is above the threshold, it is not considered an ' + 'outlier. This can be used to re-run noisy data points until we have at least N samples, at which point ' + 'we consider the data to be accurate, even if the result is beyond the threshold. By default, there is ' + 'no limit on the number of data points.') + parser.add_argument('--git-repo', type=directory_path, default=pathlib.Path(os.getcwd()), + help='Path to the git repository to use for ordering commits in time. ' + 'By default, the current working directory is used.') + args = parser.parse_args(argv) + + # Extract benchmark data from the directory. + data = {} + files = [f for f in args.directory.glob('*.lnt')] + for file in tqdm.tqdm(files, desc='Parsing LNT files'): + rows = parse_lnt(file.read_text().splitlines()) + (commit, _) = os.path.splitext(os.path.basename(file)) + commit = Commit(args.git_repo, commit) + data[commit] = rows + + # Obtain commit information which is then cached throughout the program. Do this + # eagerly so we can provide a progress bar. + for commit in tqdm.tqdm(data.keys(), desc='Prefetching Git information'): + commit.prefetch() + + # Create a dataframe from the raw data and add some columns to it: + # - 'commit' represents the Commit object associated to the results in that row + # - `revlist_order` represents the order of the commit within the Git repository. + revlist = sorted_revlist(args.git_repo, [c.fullrev for c in data.keys()]) + data = pandas.DataFrame([row | {'commit': c} for (c, rows) in data.items() for row in rows]) + data = data.join(pandas.DataFrame([{'revlist_order': revlist.index(c.fullrev)} for c in data['commit']])) + + # Filter the benchmarks if needed. + if args.filter is not None: + keeplist = [b for b in data['benchmark'] if re.search(args.filter, b) is not None] + data = data[data['benchmark'].isin(keeplist)] + + # Detect outliers by selecting all benchmarks whose change percentage is beyond the threshold. + # If we have a max number of points, also take that into account. + if args.data_points_threshold is not None: + print(f'Generating outliers with more than {args.outlier_threshold * 100}% relative difference and less than {args.data_points_threshold} data points') + else: + print(f'Generating outliers with more than {args.outlier_threshold * 100}% relative difference') + + overall = set() + for (benchmark, series) in data.sort_values(by='revlist_order').groupby('benchmark'): + pct_change = series[args.metric].pct_change() + outliers = series[pct_change.abs() > args.outlier_threshold] + if args.data_points_threshold is not None: + outliers = outliers[outliers['data_points'] < args.data_points_threshold] + outliers = set(outliers['commit']) + overall |= outliers + if len(outliers) > 0: + print(f'{benchmark}: {" ".join(c.shortrev for c in outliers)}') + + if len(overall) > 0: + print(f'Summary: {" ".join(c.shortrev for c in overall)}') + else: + print(f'No outliers') + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/libcxx/utils/visualize-historical b/libcxx/utils/visualize-historical index ef28e8b..114c7e8 100755 --- a/libcxx/utils/visualize-historical +++ b/libcxx/utils/visualize-historical @@ -213,13 +213,6 @@ def main(argv): 'Since the chart is interactive, it generally makes most sense to include all the benchmarks ' 'and to then filter them in the browser, but in some cases producing a chart with a reduced ' 'number of data series is useful.') - parser.add_argument('--find-outliers', metavar='FLOAT', type=float, required=False, - help='Instead of building a chart, detect commits that show a large spike (more than the given relative threshold) ' - 'with the previous result and print those to standard output. This can be used to generate a list of ' - 'potential outliers that we might want to re-generate the data for. The threshold is expressed as a ' - 'floating point number, e.g. 0.25 will detect points that differ by more than 25%% from their previous ' - 'result. This option respects --filter, i.e. only benchmarks that match the filter will be analyzed for ' - 'outliers.') parser.add_argument('--subtitle', type=str, required=False, help='Optional subtitle for the chart. This can be used to help identify the contents of the chart.') parser.add_argument('--git-repo', type=directory_path, default=pathlib.Path(os.getcwd()), @@ -258,16 +251,6 @@ def main(argv): keeplist = [b for b in data['benchmark'] if re.search(args.filter, b) is not None] data = data[data['benchmark'].isin(keeplist)] - # If requested, perform a basic pass to detect outliers. - # Note that we consider a commit to be an outlier if any of the benchmarks for that commit is an outlier. - if args.find_outliers is not None: - threshold = args.find_outliers - outliers = set() - for (benchmark, series) in data.sort_values(by='revlist_order').groupby('benchmark'): - outliers |= set(series[series[args.metric].pct_change() > threshold]['commit']) - print(f'Outliers (more than {threshold * 100}%): {" ".join(c.shortrev for c in outliers)}') - return - # Plot the data for all the required benchmarks. figure = create_plot(data, args.metric, subtitle=args.subtitle) do_open = args.output is None or args.open diff --git a/libunwind/test/configs/cmake-bridge.cfg.in b/libunwind/test/configs/cmake-bridge.cfg.in index b804c21..e40497b 100644 --- a/libunwind/test/configs/cmake-bridge.cfg.in +++ b/libunwind/test/configs/cmake-bridge.cfg.in @@ -14,6 +14,7 @@ import os, site site.addsitedir(os.path.join('@LIBUNWIND_LIBCXX_PATH@', 'utils')) import libcxx.test.format +from lit.util import which # Basic configuration of the test suite config.name = os.path.basename('@LIBUNWIND_TEST_CONFIG@') @@ -33,3 +34,13 @@ config.substitutions.append(('%{install-prefix}', '@LIBUNWIND_TESTING_INSTALL_PR config.substitutions.append(('%{include}', '@LIBUNWIND_TESTING_INSTALL_PREFIX@/include')) config.substitutions.append(('%{lib}', '@LIBUNWIND_TESTING_INSTALL_PREFIX@/@LIBUNWIND_INSTALL_LIBRARY_DIR@')) config.substitutions.append(('%{benchmark_flags}', '')) + +# Check for objcopy tools +objcopy_path = which('llvm-objcopy', '@LLVM_BUILD_BINARY_DIR@/bin') +if not objcopy_path: + objcopy_path = which('llvm-objcopy') +if not objcopy_path: + objcopy_path = which('objcopy') +if objcopy_path: + config.substitutions.append(('%{objcopy}', objcopy_path)) + config.available_features.add('objcopy-available') diff --git a/libunwind/test/eh_frame_fde_pc_range.pass.cpp b/libunwind/test/eh_frame_fde_pc_range.pass.cpp index 39c8e80..852612b 100644 --- a/libunwind/test/eh_frame_fde_pc_range.pass.cpp +++ b/libunwind/test/eh_frame_fde_pc_range.pass.cpp @@ -14,16 +14,15 @@ // clang-format off // REQUIRES: target={{x86_64-.+-linux-gnu}} -// aarch64,arm have a cross toolchain build(llvm-clang-win-x-aarch64, etc) -// where objdump is not available. +// REQUIRES: objcopy-available // TODO: Figure out why this fails with Memory Sanitizer. // XFAIL: msan // RUN: %{build} -// RUN: objcopy --dump-section .eh_frame_hdr=%t_ehf_hdr.bin %t.exe +// RUN: %{objcopy} --dump-section .eh_frame_hdr=%t_ehf_hdr.bin %t.exe // RUN: echo -ne '\xFF' | dd of=%t_ehf_hdr.bin bs=1 seek=2 count=2 conv=notrunc status=none -// RUN: objcopy --update-section .eh_frame_hdr=%t_ehf_hdr.bin %t.exe +// RUN: %{objcopy} --update-section .eh_frame_hdr=%t_ehf_hdr.bin %t.exe // RUN: %{exec} %t.exe // clang-format on diff --git a/lldb/include/lldb/Core/Mangled.h b/lldb/include/lldb/Core/Mangled.h index 665accb..546d7a9b 100644 --- a/lldb/include/lldb/Core/Mangled.h +++ b/lldb/include/lldb/Core/Mangled.h @@ -148,13 +148,7 @@ public: /// Mangled name get accessor. /// /// \return - /// A reference to the mangled name string object. - ConstString &GetMangledName() { return m_mangled; } - - /// Mangled name get accessor. - /// - /// \return - /// A const reference to the mangled name string object. + /// The mangled name string object. ConstString GetMangledName() const { return m_mangled; } /// Best name get accessor. diff --git a/lldb/include/lldb/Target/Statistics.h b/lldb/include/lldb/Target/Statistics.h index d6983bb..26538352 100644 --- a/lldb/include/lldb/Target/Statistics.h +++ b/lldb/include/lldb/Target/Statistics.h @@ -322,12 +322,14 @@ public: void IncreaseSourceRealpathCompatibleCount(uint32_t count); StatsDuration &GetCreateTime() { return m_create_time; } + StatsDuration &GetLoadCoreTime() { return m_load_core_time; } StatsSuccessFail &GetExpressionStats() { return m_expr_eval; } StatsSuccessFail &GetFrameVariableStats() { return m_frame_var; } void Reset(Target &target); protected: StatsDuration m_create_time; + StatsDuration m_load_core_time; std::optional<StatsTimepoint> m_launch_or_attach_time; std::optional<StatsTimepoint> m_first_private_stop_time; std::optional<StatsTimepoint> m_first_public_stop_time; diff --git a/lldb/source/API/SBTarget.cpp b/lldb/source/API/SBTarget.cpp index eb56337..90ffe78 100644 --- a/lldb/source/API/SBTarget.cpp +++ b/lldb/source/API/SBTarget.cpp @@ -255,6 +255,7 @@ SBProcess SBTarget::LoadCore(const char *core_file, lldb::SBError &error) { ProcessSP process_sp(target_sp->CreateProcess( target_sp->GetDebugger().GetListener(), "", &filespec, false)); if (process_sp) { + ElapsedTime loadCoreTime(target_sp->GetStatistics().GetLoadCoreTime()); error.SetError(process_sp->LoadCore()); if (error.Success()) sb_process.SetSP(process_sp); diff --git a/lldb/source/Commands/CommandObjectTarget.cpp b/lldb/source/Commands/CommandObjectTarget.cpp index 940be42..b5fc49d 100644 --- a/lldb/source/Commands/CommandObjectTarget.cpp +++ b/lldb/source/Commands/CommandObjectTarget.cpp @@ -418,7 +418,11 @@ protected: if (process_sp) { // Seems weird that we Launch a core file, but that is what we // do! - error = process_sp->LoadCore(); + { + ElapsedTime loadCoreTime( + target_sp->GetStatistics().GetLoadCoreTime()); + error = process_sp->LoadCore(); + } if (error.Fail()) { result.AppendError(error.AsCString("unknown core file format")); diff --git a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp index fada1fd..91c93be 100644 --- a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp +++ b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp @@ -2805,32 +2805,29 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) { is_gsym = true; sym[sym_idx].SetExternal(true); - if (symbol_name && symbol_name[0] == '_' && - symbol_name[1] == 'O') { - llvm::StringRef symbol_name_ref(symbol_name); - if (symbol_name_ref.starts_with( - g_objc_v2_prefix_class)) { - symbol_name_non_abi_mangled = symbol_name + 1; - symbol_name = - symbol_name + g_objc_v2_prefix_class.size(); - type = eSymbolTypeObjCClass; - demangled_is_synthesized = true; - - } else if (symbol_name_ref.starts_with( - g_objc_v2_prefix_metaclass)) { - symbol_name_non_abi_mangled = symbol_name + 1; - symbol_name = - symbol_name + g_objc_v2_prefix_metaclass.size(); - type = eSymbolTypeObjCMetaClass; - demangled_is_synthesized = true; - } else if (symbol_name_ref.starts_with( - g_objc_v2_prefix_ivar)) { - symbol_name_non_abi_mangled = symbol_name + 1; - symbol_name = - symbol_name + g_objc_v2_prefix_ivar.size(); - type = eSymbolTypeObjCIVar; - demangled_is_synthesized = true; - } + llvm::StringRef symbol_name_ref(symbol_name); + if (symbol_name_ref.starts_with( + g_objc_v2_prefix_class)) { + symbol_name_non_abi_mangled = symbol_name + 1; + symbol_name = + symbol_name + g_objc_v2_prefix_class.size(); + type = eSymbolTypeObjCClass; + demangled_is_synthesized = true; + + } else if (symbol_name_ref.starts_with( + g_objc_v2_prefix_metaclass)) { + symbol_name_non_abi_mangled = symbol_name + 1; + symbol_name = + symbol_name + g_objc_v2_prefix_metaclass.size(); + type = eSymbolTypeObjCMetaClass; + demangled_is_synthesized = true; + } else if (symbol_name_ref.starts_with( + g_objc_v2_prefix_ivar)) { + symbol_name_non_abi_mangled = symbol_name + 1; + symbol_name = + symbol_name + g_objc_v2_prefix_ivar.size(); + type = eSymbolTypeObjCIVar; + demangled_is_synthesized = true; } else { if (nlist.n_value != 0) symbol_section = section_info.GetSection( @@ -3652,7 +3649,7 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) { if (is_debug) { switch (nlist.n_type) { - case N_GSYM: + case N_GSYM: { // global symbol: name,,NO_SECT,type,0 // Sometimes the N_GSYM value contains the address. @@ -3668,33 +3665,30 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) { is_gsym = true; sym[sym_idx].SetExternal(true); - if (symbol_name && symbol_name[0] == '_' && symbol_name[1] == 'O') { - llvm::StringRef symbol_name_ref(symbol_name); - if (symbol_name_ref.starts_with(g_objc_v2_prefix_class)) { - symbol_name_non_abi_mangled = symbol_name + 1; - symbol_name = symbol_name + g_objc_v2_prefix_class.size(); - type = eSymbolTypeObjCClass; - demangled_is_synthesized = true; - - } else if (symbol_name_ref.starts_with( - g_objc_v2_prefix_metaclass)) { - symbol_name_non_abi_mangled = symbol_name + 1; - symbol_name = symbol_name + g_objc_v2_prefix_metaclass.size(); - type = eSymbolTypeObjCMetaClass; - demangled_is_synthesized = true; - } else if (symbol_name_ref.starts_with(g_objc_v2_prefix_ivar)) { - symbol_name_non_abi_mangled = symbol_name + 1; - symbol_name = symbol_name + g_objc_v2_prefix_ivar.size(); - type = eSymbolTypeObjCIVar; - demangled_is_synthesized = true; - } + llvm::StringRef symbol_name_ref(symbol_name); + if (symbol_name_ref.starts_with(g_objc_v2_prefix_class)) { + symbol_name_non_abi_mangled = symbol_name + 1; + symbol_name = symbol_name + g_objc_v2_prefix_class.size(); + type = eSymbolTypeObjCClass; + demangled_is_synthesized = true; + + } else if (symbol_name_ref.starts_with(g_objc_v2_prefix_metaclass)) { + symbol_name_non_abi_mangled = symbol_name + 1; + symbol_name = symbol_name + g_objc_v2_prefix_metaclass.size(); + type = eSymbolTypeObjCMetaClass; + demangled_is_synthesized = true; + } else if (symbol_name_ref.starts_with(g_objc_v2_prefix_ivar)) { + symbol_name_non_abi_mangled = symbol_name + 1; + symbol_name = symbol_name + g_objc_v2_prefix_ivar.size(); + type = eSymbolTypeObjCIVar; + demangled_is_synthesized = true; } else { if (nlist.n_value != 0) symbol_section = section_info.GetSection(nlist.n_sect, nlist.n_value); type = eSymbolTypeData; } - break; + } break; case N_FNAME: // procedure name (f77 kludge): name,,NO_SECT,0,0 diff --git a/lldb/source/Target/Statistics.cpp b/lldb/source/Target/Statistics.cpp index 8ad8d50..f7311a8b 100644 --- a/lldb/source/Target/Statistics.cpp +++ b/lldb/source/Target/Statistics.cpp @@ -148,6 +148,11 @@ TargetStats::ToJSON(Target &target, target_metrics_json.try_emplace("targetCreateTime", m_create_time.get().count()); + if (m_load_core_time.get().count() > 0) { + target_metrics_json.try_emplace("loadCoreTime", + m_load_core_time.get().count()); + } + json::Array breakpoints_array; double totalBreakpointResolveTime = 0.0; // Report both the normal breakpoint list and the internal breakpoint list. diff --git a/lldb/test/API/functionalities/json/symbol-file/Makefile b/lldb/test/API/functionalities/json/symbol-file/Makefile index 13bc164..5d05d95f 100644 --- a/lldb/test/API/functionalities/json/symbol-file/Makefile +++ b/lldb/test/API/functionalities/json/symbol-file/Makefile @@ -1,4 +1,5 @@ C_SOURCES := main.c +CFLAGS_EXTRAS := -no-pie all: stripped.out diff --git a/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py b/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py index f06c9ae..d7249df 100644 --- a/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py +++ b/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py @@ -1,6 +1,7 @@ # Test the SBAPI for GetStatistics() import json + import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * @@ -54,6 +55,11 @@ class TestStatsAPI(TestBase): stats_json, 'Make sure the "frameVariable" key in in target.GetStatistics()["targets"][0]', ) + self.assertNotIn( + "loadCoreTime", + stats_json, + "LoadCoreTime should not be present in a live, non-coredump target", + ) expressionEvaluation = stats_json["expressionEvaluation"] self.assertIn( "successes", @@ -157,3 +163,25 @@ class TestStatsAPI(TestBase): stats_force.GetAsJSON(stream_force) debug_stats_force = json.loads(stream_force.GetData()) self.assertEqual(debug_stats_force["totalDebugInfoByteSize"], 445) + + def test_core_load_time(self): + """ + Test to see if the coredump path is included in statistics dump. + """ + yaml_file = "arm64-minidump-build-ids.yaml" + src_dir = self.getSourceDir() + minidump_path = self.getBuildArtifact(os.path.basename(yaml_file) + ".dmp") + self.yaml2obj(os.path.join(src_dir, yaml_file), minidump_path) + target = self.dbg.CreateTarget(None) + process = target.LoadCore(minidump_path) + self.assertTrue(process.IsValid()) + + stats_options = lldb.SBStatisticsOptions() + stats = target.GetStatistics(stats_options) + stream = lldb.SBStream() + stats.GetAsJSON(stream) + debug_stats = json.loads(stream.GetData()) + self.assertTrue("targets" in debug_stats) + target_info = debug_stats["targets"][0] + self.assertTrue("loadCoreTime" in target_info) + self.assertTrue(float(target_info["loadCoreTime"]) > 0.0) diff --git a/lldb/test/API/functionalities/stats_api/arm64-minidump-build-ids.yaml b/lldb/test/API/functionalities/stats_api/arm64-minidump-build-ids.yaml new file mode 100644 index 0000000..4acbc40 --- /dev/null +++ b/lldb/test/API/functionalities/stats_api/arm64-minidump-build-ids.yaml @@ -0,0 +1,19 @@ +--- !minidump +Streams: + - Type: SystemInfo + Processor Arch: ARM + Platform ID: Linux + CSD Version: '15E216' + CPU: + CPUID: 0x00000000 + - Type: ModuleList + Modules: + - Base of Image: 0x0000000000001000 + Size of Image: 0x00001000 + Module Name: '/tmp/a' + CodeView Record: 4C4570420102030405060708090A0B0C0D0E0F1011121314 + - Base of Image: 0x0000000000001000 + Size of Image: 0x00001000 + Module Name: '/tmp/b' + CodeView Record: 4C4570420A141E28323C46505A646E78828C96A0AAB4BEC8 +... diff --git a/lldb/test/API/lang/cpp/floating-types-specialization/TestCppFloatingTypesSpecialization.py b/lldb/test/API/lang/cpp/floating-types-specialization/TestCppFloatingTypesSpecialization.py index 9564a0b..f4530cd 100644 --- a/lldb/test/API/lang/cpp/floating-types-specialization/TestCppFloatingTypesSpecialization.py +++ b/lldb/test/API/lang/cpp/floating-types-specialization/TestCppFloatingTypesSpecialization.py @@ -1,4 +1,5 @@ import lldb +import lldbsuite.test.lldbplatformutil as lldbplatformutil from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil @@ -11,12 +12,25 @@ class TestCase(TestBase): self, "// break here", lldb.SBFileSpec("main.cpp", False) ) - self.expect_expr("f0", result_type="Foo<__bf16>") - self.expect_expr("f1", result_type="Foo<__fp16>") + # On 32-bit Arm, you have to have the bfloat16 extension, or an FPU while + # not using the soft float mode. The target we assume has none of that + # so instead of __bf16 we get __fp16. + is_arm_32_bit = lldbplatformutil.getArchitecture() == "arm" + + self.expect_expr( + "f0", result_type=("Foo<__fp16>" if is_arm_32_bit else "Foo<__bf16>") + ) + + # When __bf16 is actually __fp16, f1 looks like it inherits from itself. + # Which clang allows but LLDB fails to evaluate. + if not is_arm_32_bit: + self.expect_expr("f1", result_type="Foo<__fp16>") # Test sizeof to ensure while computing layout we don't do # infinite recursion. v = self.frame().EvaluateExpression("sizeof(f0)") self.assertEqual(v.GetValueAsUnsigned() > 0, True) - v = self.frame().EvaluateExpression("sizeof(f1)") - self.assertEqual(v.GetValueAsUnsigned() > 0, True) + + if not is_arm_32_bit: + v = self.frame().EvaluateExpression("sizeof(f1)") + self.assertEqual(v.GetValueAsUnsigned() > 0, True) diff --git a/lldb/test/API/lang/cpp/template-arguments/TestCppTemplateArguments.py b/lldb/test/API/lang/cpp/template-arguments/TestCppTemplateArguments.py index f26d382..83c0572 100644 --- a/lldb/test/API/lang/cpp/template-arguments/TestCppTemplateArguments.py +++ b/lldb/test/API/lang/cpp/template-arguments/TestCppTemplateArguments.py @@ -1,4 +1,5 @@ import lldb +import lldbsuite.test.lldbplatformutil as lldbplatformutil from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil @@ -82,8 +83,12 @@ class TestCase(TestBase): value = self.expect_expr("temp7", result_type="Foo<__fp16, __fp16>") self.assertFalse(value.GetType().GetTemplateArgumentValue(target, 1)) - value = self.expect_expr("temp8", result_type="Foo<__bf16, __bf16>") - self.assertFalse(value.GetType().GetTemplateArgumentValue(target, 1)) + # The target we use when evaluating these expressions for Arm leads to there + # not being a __bf16 type in the AST so we fall back to __fp16 and evaluating + # this fails. + if lldbplatformutil.getArchitecture() != "arm": + value = self.expect_expr("temp8", result_type="Foo<__bf16, __bf16>") + self.assertFalse(value.GetType().GetTemplateArgumentValue(target, 1)) value = self.expect_expr("temp9", result_type="Bar<double, 1.200000e+00>") template_param_value = value.GetType().GetTemplateArgumentValue(target, 1) diff --git a/lldb/test/Shell/Expr/TestGlobalSymbolObjCConflict.c b/lldb/test/Shell/Expr/TestGlobalSymbolObjCConflict.c new file mode 100644 index 0000000..62c0162 --- /dev/null +++ b/lldb/test/Shell/Expr/TestGlobalSymbolObjCConflict.c @@ -0,0 +1,33 @@ +// Tests that LLDB correctly parses global symbols +// starting with 'O'. On some platforms (e.g., Darwin) +// C-symbols are prefixed with a '_'. The LLDB Macho-O +// parses handles Objective-C metadata symbols starting +// with '_OBJC' specially. This test ensures that we don't +// lose track of regular global symbols with a '_O' prefix +// in this. + +// RUN: %clang_host -c -g -fno-common %s -o %t.o +// RUN: %clang_host %t.o -o %t.out +// RUN: %lldb -b -x %t.out \ +// RUN: -o "b 27" \ +// RUN: -o "run" \ +// RUN: -o "p OglobalVar" \ +// RUN: -o "p Oabc" | FileCheck %s + +typedef struct { + int a; +} Oabc_t; + +Oabc_t Oabc; +int OglobalVar; + +int main(int argc, const char *argv[]) { + Oabc.a = 15; + OglobalVar = 10; + return OglobalVar + Oabc.a; +} + +// CHECK: (lldb) p OglobalVar +// CHECK: (int) 10 +// CHECK: (lldb) p Oabc +// CHECK: (Oabc_t) (a = 15) diff --git a/lldb/test/Shell/lit.cfg.py b/lldb/test/Shell/lit.cfg.py index 505847f..cdc0cfe 100644 --- a/lldb/test/Shell/lit.cfg.py +++ b/lldb/test/Shell/lit.cfg.py @@ -33,7 +33,7 @@ config.test_format = toolchain.ShTestLldb(not use_lit_shell) # suffixes: A list of file extensions to treat as test files. This is overriden # by individual lit.local.cfg files in the test subdirectories. -config.suffixes = [".test", ".cpp", ".s", ".m", ".ll"] +config.suffixes = [".test", ".cpp", ".s", ".m", ".ll", ".c"] # excludes: A list of directories to exclude from the testsuite. The 'Inputs' # subdirectories contain auxiliary inputs for various tests in their parent diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt index b981929..c450ee5 100644 --- a/llvm/CMakeLists.txt +++ b/llvm/CMakeLists.txt @@ -1011,6 +1011,9 @@ set(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR ${LLVM_ENABLE_PER_TARGET_RUNTIME_DIR_defa set(LLVM_PROFDATA_FILE "" CACHE FILEPATH "Profiling data file to use when compiling in order to improve runtime performance.") +set(LLVM_SPROFDATA_FILE "" CACHE FILEPATH + "Sampling profiling data file to use when compiling in order to improve runtime performance.") + if(LLVM_INCLUDE_TESTS) # All LLVM Python files should be compatible down to this minimum version. set(LLVM_MINIMUM_PYTHON_VERSION 3.8) diff --git a/llvm/Maintainers.md b/llvm/Maintainers.md index 5afdd15..e522592 100644 --- a/llvm/Maintainers.md +++ b/llvm/Maintainers.md @@ -123,6 +123,13 @@ a.bataev@outlook.com (email), [alexey-bataev](https://github.com/alexey-bataev) Chandler Carruth \ chandlerc@gmail.com, chandlerc@google.com (email), [chandlerc](https://github.com/chandlerc) (GitHub) +#### DFAJumpThreading + +Hongyu Chen \ +xxs\_chy@outlook.com (email), [XChy](https://github.com/XChy) (Github) \ +Usman Nadeem \ +mnadeem@quicinc.com (email), [UsmanNadeem](https://github.com/UsmanNadeem) (Github) + ### Instrumentation and sanitizers #### Sanitizers not covered by someone else diff --git a/llvm/cmake/modules/HandleLLVMOptions.cmake b/llvm/cmake/modules/HandleLLVMOptions.cmake index 8eca29f..d4195db6 100644 --- a/llvm/cmake/modules/HandleLLVMOptions.cmake +++ b/llvm/cmake/modules/HandleLLVMOptions.cmake @@ -1184,7 +1184,7 @@ if(LLVM_ENABLE_EH AND NOT LLVM_ENABLE_RTTI) message(FATAL_ERROR "Exception handling requires RTTI. You must set LLVM_ENABLE_RTTI to ON") endif() -set(LLVM_BUILD_INSTRUMENTED OFF CACHE STRING "Build LLVM and tools with PGO instrumentation. May be specified as IR or Frontend") +set(LLVM_BUILD_INSTRUMENTED OFF CACHE STRING "Build LLVM and tools with PGO instrumentation. May be specified as IR, Frontend, CSIR, CSSPGO") set(LLVM_VP_COUNTERS_PER_SITE "1.5" CACHE STRING "Value profile counters to use per site for IR PGO with Clang") mark_as_advanced(LLVM_BUILD_INSTRUMENTED LLVM_VP_COUNTERS_PER_SITE) string(TOUPPER "${LLVM_BUILD_INSTRUMENTED}" uppercase_LLVM_BUILD_INSTRUMENTED) @@ -1217,6 +1217,19 @@ if (LLVM_BUILD_INSTRUMENTED) CMAKE_EXE_LINKER_FLAGS CMAKE_SHARED_LINKER_FLAGS) endif() + elseif(uppercase_LLVM_BUILD_INSTRUMENTED STREQUAL "CSSPGO") + if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") + append("-fno-omit-frame-pointer -mno-omit-leaf-frame-pointer -fno-optimize-sibling-calls -fpseudo-probe-for-profiling -fdebug-info-for-profiling" + CMAKE_CXX_FLAGS + CMAKE_C_FLAGS) + if(NOT LINKER_IS_LLD_LINK) + append("-fno-omit-frame-pointer -mno-omit-leaf-frame-pointer -fno-optimize-sibling-calls -fpseudo-probe-for-profiling -fdebug-info-for-profiling" + CMAKE_EXE_LINKER_FLAGS + CMAKE_SHARED_LINKER_FLAGS) + endif() + else() + message(FATAL_ERROR "LLVM_BUILD_INSTRUMENTED=CSSPGO can only be specified when compiling with clang") + endif() else() append("-fprofile-instr-generate=\"${LLVM_PROFILE_FILE_PATTERN}\"" CMAKE_CXX_FLAGS @@ -1269,6 +1282,21 @@ elseif(LLVM_PROFDATA_FILE) message(WARNING "LLVM_PROFDATA_FILE specified, but ${LLVM_PROFDATA_FILE} not found") endif() +if(LLVM_SPROFDATA_FILE AND EXISTS ${LLVM_SPROFDATA_FILE}) + if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" ) + append("-fpseudo-probe-for-profiling -fprofile-sample-use=\"${LLVM_SPROFDATA_FILE}\"" + CMAKE_CXX_FLAGS + CMAKE_C_FLAGS) + if(NOT LINKER_IS_LLD_LINK) + append("-fpseudo-probe-for-profiling -fprofile-sample-use=\"${LLVM_SPROFDATA_FILE}\"" + CMAKE_EXE_LINKER_FLAGS + CMAKE_SHARED_LINKER_FLAGS) + endif() + else() + message(FATAL_ERROR "LLVM_SPROFDATA_FILE can only be specified when compiling with clang") + endif() +endif() + option(LLVM_BUILD_INSTRUMENTED_COVERAGE "Build LLVM and tools with Code Coverage instrumentation" Off) option(LLVM_INDIVIDUAL_TEST_COVERAGE "Emit individual coverage file for each test case." OFF) mark_as_advanced(LLVM_BUILD_INSTRUMENTED_COVERAGE) diff --git a/llvm/docs/AMDGPU/AMDGPUAsmGFX12.rst b/llvm/docs/AMDGPU/AMDGPUAsmGFX12.rst new file mode 100644 index 0000000..7259ee87 --- /dev/null +++ b/llvm/docs/AMDGPU/AMDGPUAsmGFX12.rst @@ -0,0 +1,2002 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +==================================================================================== +Syntax of GFX12 Instructions +==================================================================================== + +.. contents:: + :local: + +Introduction +============ + +This document describes the syntax of GFX12 instructions. + +Notation +======== + +Notation used in this document is explained :ref:`here<amdgpu_syn_instruction_notation>`. + +Overview +======== + +An overview of generic syntax and other features of AMDGPU instructions may be found :ref:`in this document<amdgpu_syn_instructions>`. + +Instructions +============ + + +SMEM +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **SRC3** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + s_atc_probe :ref:`sdata<amdgpu_synid_gfx12_sdata_d725ab>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_atc_probe_buffer :ref:`sdata<amdgpu_synid_gfx12_sdata_d725ab>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_buffer_load_b128 :ref:`sdata<amdgpu_synid_gfx12_sdata_4585b8>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_buffer_load_b256 :ref:`sdata<amdgpu_synid_gfx12_sdata_0974a4>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_buffer_load_b32 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_buffer_load_b512 :ref:`sdata<amdgpu_synid_gfx12_sdata_6c003b>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_buffer_load_b64 :ref:`sdata<amdgpu_synid_gfx12_sdata_354189>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_buffer_load_b96 :ref:`sdata<amdgpu_synid_gfx12_sdata_dd9dd8>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_buffer_load_i16 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_buffer_load_i8 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_buffer_load_u16 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_buffer_load_u8 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_buffer_nop :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_buffer_prefetch_data :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`ioffset<amdgpu_synid_gfx12_ioffset>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>`, :ref:`sdata<amdgpu_synid_gfx12_sdata_5c7b50>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_dcache_inv :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_load_b128 :ref:`sdata<amdgpu_synid_gfx12_sdata_4585b8>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_load_b256 :ref:`sdata<amdgpu_synid_gfx12_sdata_0974a4>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_load_b32 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_load_b512 :ref:`sdata<amdgpu_synid_gfx12_sdata_6c003b>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_load_b64 :ref:`sdata<amdgpu_synid_gfx12_sdata_354189>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_load_b96 :ref:`sdata<amdgpu_synid_gfx12_sdata_dd9dd8>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_load_i16 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_load_i8 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_load_u16 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_load_u8 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_prefetch_data :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`ioffset<amdgpu_synid_gfx12_ioffset>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>`, :ref:`sdata<amdgpu_synid_gfx12_sdata_5c7b50>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_prefetch_data_pc_rel :ref:`ioffset<amdgpu_synid_gfx12_ioffset>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>`, :ref:`sdata<amdgpu_synid_gfx12_sdata_5c7b50>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_prefetch_inst :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`ioffset<amdgpu_synid_gfx12_ioffset>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>`, :ref:`sdata<amdgpu_synid_gfx12_sdata_5c7b50>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + s_prefetch_inst_pc_rel :ref:`ioffset<amdgpu_synid_gfx12_ioffset>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>`, :ref:`sdata<amdgpu_synid_gfx12_sdata_5c7b50>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + +SOP1 +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + s_abs_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_alloc_vgpr :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_and_not0_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_and_not0_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_and_not0_wrexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_and_not0_wrexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_and_not1_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_and_not1_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_and_not1_wrexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_and_not1_wrexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_and_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_and_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_barrier_init :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>` + s_barrier_join :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>` + s_barrier_signal :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>` + s_barrier_signal_isfirst :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>` + s_bcnt0_i32_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_bcnt0_i32_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_bcnt1_i32_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_bcnt1_i32_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_bitreplicate_b64_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_bitset0_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_bitset0_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_bitset1_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_bitset1_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_brev_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_brev_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_ceil_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_ceil_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_cls_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_cls_i32_i64 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_clz_i32_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_clz_i32_u64 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_cmov_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_cmov_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_ctz_i32_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_ctz_i32_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_cvt_f16_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_cvt_f32_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_cvt_f32_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_cvt_f32_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_cvt_hi_f32_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_cvt_i32_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_cvt_u32_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_floor_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_floor_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_get_barrier_state :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>` + s_get_lock_state :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>` + s_getpc_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>` + s_mov_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_mov_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_mov_fed_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_mov_from_global_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_007f9c>` + s_mov_from_global_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_2797bc>` + s_mov_regrd_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_mov_to_global_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_mov_to_global_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_movreld_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_movreld_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_movrels_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_007f9c>` + s_movrels_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_2797bc>` + s_movrelsd_2_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_007f9c>` + s_nand_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_nand_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_nor_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_nor_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_not_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_not_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_or_not0_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_or_not0_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_or_not1_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_or_not1_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_or_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_or_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_quadmask_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_quadmask_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_rfe_b64 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_2797bc>` + s_rndne_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_rndne_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_sendmsg_rtn_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_245536>` + s_sendmsg_rtn_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_245536>` + s_setpc_b64 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_2797bc>` + s_sext_i32_i16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_sext_i32_i8 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_sleep_var :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_swap_to_global_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_007f9c>` + s_swappc_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_2797bc>` + s_trunc_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_trunc_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_try_lock :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>` + s_unlock :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>` + s_wakeup_barrier :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>` + s_wqm_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_wqm_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_xnor_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_xnor_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + s_xor_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>` + s_xor_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>` + +SOP2 +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + s_absdiff_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_add_co_ci_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_add_co_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_add_co_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_add_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_add_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_add_nc_u64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>` + s_and_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_and_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>` + s_and_not1_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_and_not1_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>` + s_ashr_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_ashr_i64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_bfe_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_bfe_i64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_bfe_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_bfe_u64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_bfm_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_bfm_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cselect_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cselect_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>` + s_cvt_pk_rtz_f16_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_fmaak_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + s_fmac_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_fmac_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_fmamk_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_lshl1_add_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_lshl2_add_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_lshl3_add_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_lshl4_add_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_lshl_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_lshl_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_lshr_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_lshr_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_max_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_max_num_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_max_num_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_max_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_maximum_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_maximum_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_min_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_min_num_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_min_num_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_min_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_minimum_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_minimum_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_mul_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_mul_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_mul_hi_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_mul_hi_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_mul_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_mul_u64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>` + s_nand_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_nand_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>` + s_nor_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_nor_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>` + s_or_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_or_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>` + s_or_not1_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_or_not1_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>` + s_pack_hh_b32_b16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_pack_hl_b32_b16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_pack_lh_b32_b16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_pack_ll_b32_b16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_sub_co_ci_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_sub_co_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_sub_co_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_sub_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_sub_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_sub_nc_u64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>` + s_xnor_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_xnor_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>` + s_xor_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_xor_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>` + +SOPC +---- + +.. parsed-literal:: + + **INSTRUCTION** **SRC0** **SRC1** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + s_bitcmp0_b32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_bitcmp0_b64 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_bitcmp1_b32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_bitcmp1_b64 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_eq_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_eq_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_eq_i32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_eq_u32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_eq_u64 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>` + s_cmp_ge_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_ge_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_ge_i32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_ge_u32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_gt_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_gt_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_gt_i32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_gt_u32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_le_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_le_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_le_i32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_le_u32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_lg_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_lg_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_lg_i32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_lg_u32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_lg_u64 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>` + s_cmp_lt_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_lt_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_lt_i32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_lt_u32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_neq_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_neq_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_nge_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_nge_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_ngt_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_ngt_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_nle_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_nle_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_nlg_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_nlg_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_nlt_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_nlt_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_o_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_o_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_u_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + s_cmp_u_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>` + +SOPK +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + s_addk_co_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_call_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>` + s_cmovk_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_cmpk_eq_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_cmpk_eq_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_cmpk_ge_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_cmpk_ge_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_cmpk_gt_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_cmpk_gt_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_cmpk_le_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_cmpk_le_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_cmpk_lg_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_cmpk_lg_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_cmpk_lt_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_cmpk_lt_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_getreg_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_7ed651>` + s_getreg_regrd_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_7ed651>` + s_movk_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_mulk_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_setreg_b32 :ref:`simm16<amdgpu_synid_gfx12_simm16_cc1716>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_20064d>` + s_setreg_imm32_b32 :ref:`simm16<amdgpu_synid_gfx12_simm16_cc1716>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + s_subvector_loop_begin :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>` + s_subvector_loop_end :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>` + s_version :ref:`simm16<amdgpu_synid_gfx12_simm16_15ccdd>` + +SOPP +---- + +.. parsed-literal:: + + **INSTRUCTION** **SRC** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + s_barrier + s_barrier_leave + s_barrier_wait :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_branch :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>` + s_cbranch_cdbgsys :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>` + s_cbranch_cdbgsys_and_user :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>` + s_cbranch_cdbgsys_or_user :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>` + s_cbranch_cdbguser :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>` + s_cbranch_execnz :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>` + s_cbranch_execz :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>` + s_cbranch_scc0 :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>` + s_cbranch_scc1 :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>` + s_cbranch_vccnz :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>` + s_cbranch_vccz :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>` + s_clause :ref:`simm16<amdgpu_synid_gfx12_simm16_730a13>` + s_code_end + s_decperflevel :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_delay_alu :ref:`simm16<amdgpu_synid_gfx12_simm16_c98889>` + s_denorm_mode :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_endpgm + s_endpgm_ordered_ps_done + s_endpgm_saved + s_icache_inv + s_incperflevel :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_nop :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_round_mode :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_sendmsg :ref:`simm16<amdgpu_synid_gfx12_simm16_ee8b30>` + s_sendmsghalt :ref:`simm16<amdgpu_synid_gfx12_simm16_ee8b30>` + s_set_inst_prefetch_distance :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_sethalt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_setkill :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_setprio :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_singleuse_vdst :ref:`simm16<amdgpu_synid_gfx12_simm16_81e671>` + s_sleep :ref:`simm16<amdgpu_synid_gfx12_simm16_81e671>` + s_trap :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_ttracedata + s_ttracedata_imm :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_wait_alu :ref:`simm16<amdgpu_synid_gfx12_simm16_81e671>` + s_wait_bvhcnt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_wait_dscnt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_wait_event :ref:`simm16<amdgpu_synid_gfx12_simm16_81e671>` + s_wait_expcnt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_wait_idle + s_wait_kmcnt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_wait_loadcnt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_wait_loadcnt_dscnt :ref:`simm16<amdgpu_synid_gfx12_simm16_81e671>` + s_wait_samplecnt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_wait_storecnt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>` + s_wait_storecnt_dscnt :ref:`simm16<amdgpu_synid_gfx12_simm16_81e671>` + s_waitcnt :ref:`simm16<amdgpu_synid_gfx12_simm16_218bea>` + s_wakeup + +VBUFFER +------- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + buffer_atomic_add_f32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_add_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_add_u64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_and_b32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_and_b64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_cmpswap_b32 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_cmpswap_b64 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_cond_sub_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_dec_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_dec_u64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_inc_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_inc_u64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_max_i32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_max_i64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_max_num_f32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_max_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_max_u64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_min_i32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_min_i64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_min_num_f32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_min_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_min_u64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_or_b32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_or_b64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_pk_add_bf16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_pk_add_f16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_sub_clamp_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_sub_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_sub_u64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_swap_b32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_swap_b64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_xor_b32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_atomic_xor_b64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_gl0_inv :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_gl1_inv :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_b128 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_b32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_b64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_b96 :ref:`vdata<amdgpu_synid_gfx12_vdata_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_block :ref:`vdata<amdgpu_synid_gfx12_vdata_2eda77>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_d16_b16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_d16_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_d16_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_d16_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_d16_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_d16_hi_b16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_d16_hi_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_d16_hi_i8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_d16_hi_u8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_d16_i8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_d16_u8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_i16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_i8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_lds_b32 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_lds_format_x :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_lds_i16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_lds_i8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_lds_u16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_lds_u8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_u16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_load_u8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_nop :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_b128 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_b16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_b32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_b64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_b8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_b96 :ref:`vdata<amdgpu_synid_gfx12_vdata_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_block :ref:`vdata<amdgpu_synid_gfx12_vdata_2eda77>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_d16_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_d16_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_d16_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_d16_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_d16_hi_b16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_d16_hi_b8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_d16_hi_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + buffer_store_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_load_d16_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_load_d16_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_load_d16_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_load_d16_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_load_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_load_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_load_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_load_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_store_d16_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_store_d16_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_store_d16_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_store_d16_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_store_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_store_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_store_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + tbuffer_store_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + +VDS +--- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + ds_add_f32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_add_f64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_add_rtn_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_add_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_add_rtn_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_add_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_add_u64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_and_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_and_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_and_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_and_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_append :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_bpermute_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_bpermute_fi_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_bpermute_fi_from_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_bpermute_fi_to_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_bpermute_fi_to_simd_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_bpermute_from_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_bpermute_to_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_bpermute_to_simd_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_bvh_stack_push4_pop1_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_e016a1>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_bvh_stack_push8_pop1_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_731030>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_bvh_stack_push8_pop2_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_731030>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_cmpstore_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_cmpstore_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_cmpstore_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_cmpstore_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_cond_sub_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_cond_sub_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_condxchg32_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_consume :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_dec_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_dec_rtn_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_dec_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_dec_u64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_inc_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_inc_rtn_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_inc_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_inc_u64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_2addr_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_2addr_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_2addr_stride64_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_2addr_stride64_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_addtid_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_b128 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_b96 :ref:`vdst<amdgpu_synid_gfx12_vdst_48e42f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_i8_d16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_i8_d16_hi :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_u16_d16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_u16_d16_hi :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_u8_d16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_load_u8_d16_hi :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_max_i32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_max_i64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_max_num_f32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_max_num_f64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_max_num_rtn_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_max_num_rtn_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_max_rtn_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_max_rtn_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_max_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_max_rtn_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_max_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_max_u64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_min_i32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_min_i64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_min_num_f32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_min_num_f64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_min_num_rtn_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_min_num_rtn_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_min_rtn_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_min_rtn_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_min_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_min_rtn_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_min_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_min_u64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_mskor_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_mskor_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_mskor_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_mskor_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_nop :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_or_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_or_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_or_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_or_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_permute_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_permute_from_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_permute_to_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_permute_to_simd_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_pk_add_bf16 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_pk_add_f16 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_pk_add_rtn_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_pk_add_rtn_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_rsub_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_rsub_rtn_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_rsub_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_rsub_u64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_store_2addr_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_store_2addr_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_store_2addr_stride64_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_store_2addr_stride64_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_store_addtid_b32 :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_store_b128 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_e016a1>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_store_b16 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_store_b16_d16_hi :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_store_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_store_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_store_b8 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_store_b8_d16_hi :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_store_b96 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_56f215>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_storexchg_2addr_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_storexchg_2addr_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_storexchg_2addr_stride64_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_storexchg_2addr_stride64_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_storexchg_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_storexchg_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_sub_clamp_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_sub_clamp_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_sub_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_sub_rtn_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_sub_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_sub_u64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_swizzle_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_wrap_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_xor_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_xor_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_xor_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + ds_xor_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>` + +VDSDIR +------ + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + ds_direct_load :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>` :ref:`wait_va_vdst<amdgpu_synid_wait_va_vdst>` :ref:`wait_vdst<amdgpu_synid_wait_vdst>` :ref:`wait_vm_vsrc<amdgpu_synid_wait_vm_vsrc>` + ds_param_load :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`attr<amdgpu_synid_gfx12_attr>` :ref:`wait_va_vdst<amdgpu_synid_wait_va_vdst>` :ref:`wait_vdst<amdgpu_synid_wait_vdst>` :ref:`wait_vm_vsrc<amdgpu_synid_wait_vm_vsrc>` + +VERIF +----- + +.. parsed-literal:: + + **INSTRUCTION** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + fake_s_delay_alu + fake_s_nop + fake_s_wait_alu + fake_s_wait_bvhcnt + fake_s_wait_dscnt + fake_s_wait_expcnt + fake_s_wait_kmcnt + fake_s_wait_loadcnt + fake_s_wait_samplecnt + fake_s_wait_storecnt + fake_s_waitcnt + fake_v_nop + ill_0 + ill_1 + ill_beef + metadata + verif_s_adjdelay_alu + +VEXPORT +------- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **SRC3** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + export :ref:`tgt<amdgpu_synid_gfx12_tgt>`, :ref:`vsrc0<amdgpu_synid_gfx12_vsrc0>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`, :ref:`vsrc2<amdgpu_synid_gfx12_vsrc2>`, :ref:`vsrc3<amdgpu_synid_gfx12_vsrc3>` :ref:`done<amdgpu_synid_done>` :ref:`row_en<amdgpu_synid_row_en>` + +VFLAT +----- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + flat_atomic_add_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_add_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_add_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_and_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_and_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_cmpswap_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_cmpswap_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_e016a1>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_cond_sub_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_dec_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_dec_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_inc_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_inc_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_max_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_max_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_max_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_max_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_max_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_min_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_min_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_min_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_min_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_min_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_or_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_or_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_pk_add_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_pk_add_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_sub_clamp_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_sub_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_sub_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_swap_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_swap_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_xor_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_atomic_xor_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_load_b128 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_load_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_load_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_load_b96 :ref:`vdst<amdgpu_synid_gfx12_vdst_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_load_d16_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_load_d16_hi_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_load_d16_hi_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_load_d16_hi_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_load_d16_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_load_d16_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_load_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_load_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_load_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_load_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_store_b128 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_e016a1>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_store_b16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_store_b32 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_store_b64 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_store_b8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_store_b96 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_56f215>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_store_d16_hi_b16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + flat_store_d16_hi_b8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + +VGLOBAL +------- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + global_atomic_add_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_add_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_add_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_and_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_and_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_cmpswap_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_cmpswap_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_e016a1>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_cond_sub_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_dec_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_dec_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_inc_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_inc_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_max_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_max_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_max_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_max_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_max_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_min_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_min_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_min_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_min_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_min_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_or_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_or_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_ordered_add_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_pk_add_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_pk_add_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_sub_clamp_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_sub_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_sub_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_swap_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_swap_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_xor_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_atomic_xor_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_inv :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_addtid_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_b128 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_b96 :ref:`vdst<amdgpu_synid_gfx12_vdst_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_block :ref:`vdst<amdgpu_synid_gfx12_vdst_2eda77>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_d16_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_d16_hi_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_d16_hi_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_d16_hi_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_d16_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_d16_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_lds_addtid_b32 :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_lds_b32 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_lds_i16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_lds_i8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_lds_u16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_lds_u8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_tr_b128 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_tr_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_load_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_store_addtid_b32 :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_store_b128 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_e016a1>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_store_b16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_store_b32 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_store_b64 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_store_b8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_store_b96 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_56f215>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_store_block :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_89fd7b>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_store_d16_hi_b16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_store_d16_hi_b8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_wb :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + global_wbinv :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + +VIMAGE +------ + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + image_atomic_add_flt :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_add_uint :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_and :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_cmpswap :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_dec_uint :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_inc_uint :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_max_flt :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_max_int :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_max_uint :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_min_flt :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_min_int :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_min_uint :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_or :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_pk_add_bf16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_pk_add_f16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_sub_uint :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_swap :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_atomic_xor :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_bvh64_intersect_ray :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c12f43>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_bvh8_intersect_ray :ref:`vdata<amdgpu_synid_gfx12_vdata_aac3e8>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_a972b9>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_bvh_dual_intersect_ray :ref:`vdata<amdgpu_synid_gfx12_vdata_aac3e8>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c12f43>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_bvh_intersect_ray :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_a972b9>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_get_resinfo :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_load :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_load_mip :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_load_mip_pck :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_load_mip_pck_sgn :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_load_pck :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_load_pck_sgn :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_rsvd_atomic_umax_8 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_rsvd_atomic_umin_8 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_store :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_store_mip :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_store_mip_pck :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_store_pck :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + +VINTERP +------- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_interp_p10_f16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`wait_exp<amdgpu_synid_wait_exp>` + v_interp_p10_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`wait_exp<amdgpu_synid_wait_exp>` + v_interp_p10_rtz_f16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`wait_exp<amdgpu_synid_wait_exp>` + v_interp_p2_f16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`wait_exp<amdgpu_synid_wait_exp>` + v_interp_p2_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`wait_exp<amdgpu_synid_wait_exp>` + v_interp_p2_rtz_f16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`wait_exp<amdgpu_synid_wait_exp>` + +VOP1 +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_bfrev_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_ceil_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_ceil_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_ceil_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cls_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_clz_i32_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cos_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cos_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_ctz_i32_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f16_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f16_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f32_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f32_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f32_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f32_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f32_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f32_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f32_ubyte0 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f32_ubyte1 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f32_ubyte2 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f32_ubyte3 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f64_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f64_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_f64_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_floor_i32_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_i16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_i32_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_i32_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_i32_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_nearest_i32_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_norm_i16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_norm_u16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_off_f32_i4 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_pk_f32_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_pk_f32_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_u16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_u32_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_u32_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_u32_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_exp_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_exp_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_floor_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_floor_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_floor_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fract_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fract_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fract_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_frexp_exp_i16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_frexp_exp_i32_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_frexp_exp_i32_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_frexp_mant_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_frexp_mant_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_frexp_mant_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_log_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_log_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mov_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mov_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mov_fed_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mov_from_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mov_to_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_movreld_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_movrels_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_movrelsd_2_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_movrelsd_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_nop :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_not_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_not_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_permlane64_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_pipeflush :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_rcp_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_rcp_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_rcp_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_rcp_iflag_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_readfirstlane_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_rndne_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_rndne_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_rndne_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_rsq_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_rsq_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_rsq_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sat_pk_u8_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sin_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sin_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sqrt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sqrt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sqrt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_swap_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_swap_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_swaprel_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_trunc_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_trunc_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_trunc_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_writelane_regwr_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + +VOP2 +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST0** **DST1** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_add_co_ci_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_add_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_add_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_add_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_add_nc_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_add_nc_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_and_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_ashrrev_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cndmask_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_pk_rtz_f16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fmaak_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fmaak_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fmaak_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`literal<amdgpu_synid_gfx12_literal_1f74c7>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fmac_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fmac_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fmac_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fmamk_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fmamk_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fmamk_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`literal<amdgpu_synid_gfx12_literal_1f74c7>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_illegal :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_ldexp_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_lshlrev_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_lshlrev_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_lshrrev_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_max_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_max_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_max_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_max_num_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_max_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_min_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_min_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_min_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_min_num_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_min_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mul_dx9_zero_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mul_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mul_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mul_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mul_hi_i32_i24 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mul_hi_u32_u24 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mul_i32_i24 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mul_u32_u24 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mul_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_or_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_pk_fmac_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sub_co_ci_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sub_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sub_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sub_nc_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sub_nc_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_subrev_co_ci_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_subrev_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_subrev_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_subrev_nc_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_xnor_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_xor_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + +VOP3 +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST0** **DST1** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_add3_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_add_co_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_e701cc>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_add_lshl_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_add_nc_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_add_nc_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_add_nc_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_alignbit_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_alignbyte_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_and_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_and_or_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_ashrrev_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_ashrrev_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_bcnt_u32_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_bfe_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_bfe_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_bfi_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_bfm_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cndmask_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_2797bc>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cubeid_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cubema_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cubesc_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cubetc_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_pk_bf8_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_pk_fp8_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_pk_i16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_pk_i16_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_pk_norm_i16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_pk_norm_i16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_pk_norm_u16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_pk_norm_u16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_pk_u16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_pk_u16_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_pk_u8_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_sr_bf8_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cvt_sr_fp8_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_div_fixup_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_div_fixup_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_div_fixup_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_div_fmas_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_div_fmas_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_div_scale_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_e701cc>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_div_scale_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_e701cc>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_dot2_bf16_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_dot2_f16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fma_dx9_zero_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fma_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fma_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_fma_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_ldexp_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_ldexp_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_lerp_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_lshl_add_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_lshl_add_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_lshl_or_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_lshlrev_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_lshrrev_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_lshrrev_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mad_co_i64_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_e701cc>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mad_co_u64_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_e701cc>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mad_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mad_i32_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mad_i32_i24 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mad_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mad_u32_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mad_u32_u24 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_max3_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_max3_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_max3_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_max3_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_max3_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_max3_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_max_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_max_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_maximum3_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_maximum3_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_maximum_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_maximum_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_maximum_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_maximumminimum_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_maximumminimum_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_maxmin_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_maxmin_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_maxmin_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_maxmin_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mbcnt_hi_u32_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mbcnt_lo_u32_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_med3_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_med3_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_med3_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_med3_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_med3_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_med3_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_min3_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_min3_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_min3_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_min3_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_min3_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_min3_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_min_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_min_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_minimum3_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_minimum3_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_minimum_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_minimum_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_minimum_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_minimummaximum_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_minimummaximum_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_minmax_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_minmax_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_minmax_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_minmax_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mqsad_pk_u16_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mqsad_u32_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_e016a1>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_msad_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mul_hi_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mul_hi_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mul_lo_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mul_lo_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_mullit_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_or3_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_or_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_pack_b32_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_perm_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_permlane16_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_permlane16_var_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_permlanex16_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_permlanex16_var_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_qsad_pk_u16_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_readlane_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_977794>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_readlane_regrd_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_977794>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_s_exp_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_85aab6>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_s_exp_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_s_log_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_85aab6>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_s_log_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_s_rcp_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_85aab6>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_s_rcp_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_s_rsq_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_85aab6>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_s_rsq_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_s_sqrt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_85aab6>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_s_sqrt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sad_hi_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sad_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sad_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sad_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sub_co_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_e701cc>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sub_nc_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sub_nc_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_sub_nc_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_subrev_co_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_e701cc>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_trig_preop_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_writelane_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_977794>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_xad_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_xor3_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_xor_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + +VOP3P +----- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_dot2_f32_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_dot2_f32_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_dot4_f32_bf8_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_dot4_f32_bf8_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_dot4_f32_fp8_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_dot4_f32_fp8_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_dot4_i32_iu8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_dot4_u32_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_dot8_i32_iu4 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_dot8_u32_u4 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_fma_mix_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_fma_mixhi_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_fma_mixlo_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_pk_add_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_add_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_add_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_add_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_ashrrev_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_fma_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_pk_fma_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>` + v_pk_lshlrev_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_lshrrev_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_mad_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_pk_mad_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>` + v_pk_max_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_max_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_max_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_maximum_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_min_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_min_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_min_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_minimum_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_mul_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_mul_lo_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_sub_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_pk_sub_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>` + v_swmmac_bf16_16x16x32_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_731030>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` + v_swmmac_f16_16x16x32_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_731030>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` + v_swmmac_f32_16x16x32_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_731030>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` + v_swmmac_f32_16x16x32_bf8_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` + v_swmmac_f32_16x16x32_bf8_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` + v_swmmac_f32_16x16x32_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_731030>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` + v_swmmac_f32_16x16x32_fp8_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` + v_swmmac_f32_16x16x32_fp8_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` + v_swmmac_i32_16x16x32_iu4 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_fd235e>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` + v_swmmac_i32_16x16x32_iu8 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` + v_swmmac_i32_16x16x64_iu4 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` + v_wmma_bf16_16x16x16_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_7b936a>` + v_wmma_f16_16x16x16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_7b936a>` + v_wmma_f32_16x16x16_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>` + v_wmma_f32_16x16x16_bf8_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_fd235e>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>` + v_wmma_f32_16x16x16_bf8_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_fd235e>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>` + v_wmma_f32_16x16x16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>` + v_wmma_f32_16x16x16_fp8_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_fd235e>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>` + v_wmma_f32_16x16x16_fp8_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_fd235e>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>` + v_wmma_i32_16x16x16_iu4 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>` + v_wmma_i32_16x16x16_iu8 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_fd235e>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>` + v_wmma_i32_16x16x32_iu4 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_fd235e>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>` + +VOPC +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_cmp_class_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_class_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_class_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_eq_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_eq_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_eq_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_eq_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_eq_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_eq_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_eq_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_eq_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_eq_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_f_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_f_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_f_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_f_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_f_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_f_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_f_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ge_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ge_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ge_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ge_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ge_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ge_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ge_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ge_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ge_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_gt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_gt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_gt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_gt_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_gt_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_gt_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_gt_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_gt_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_gt_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_le_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_le_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_le_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_le_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_le_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_le_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_le_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_le_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_le_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_lg_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_lg_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_lg_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_lt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_lt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_lt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_lt_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_lt_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_lt_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_lt_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_lt_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_lt_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ne_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ne_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ne_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ne_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ne_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ne_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_neq_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_neq_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_neq_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_nge_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_nge_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_nge_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ngt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ngt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_ngt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_nle_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_nle_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_nle_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_nlg_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_nlg_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_nlg_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_nlt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_nlt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_nlt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_o_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_o_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_o_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_t_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_t_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_t_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_t_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_t_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_t_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_t_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_u_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_u_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmp_u_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_class_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_class_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_class_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_eq_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_eq_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_eq_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_eq_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_eq_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_eq_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_eq_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_eq_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_eq_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_f_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_f_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_f_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_f_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_f_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_f_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_f_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ge_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ge_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ge_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ge_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ge_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ge_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ge_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ge_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ge_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_gt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_gt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_gt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_gt_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_gt_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_gt_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_gt_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_gt_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_gt_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_le_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_le_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_le_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_le_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_le_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_le_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_le_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_le_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_le_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_lg_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_lg_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_lg_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_lt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_lt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_lt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_lt_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_lt_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_lt_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_lt_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_lt_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_lt_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ne_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ne_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ne_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ne_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ne_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ne_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_neq_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_neq_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_neq_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_nge_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_nge_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_nge_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ngt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ngt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_ngt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_nle_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_nle_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_nle_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_nlg_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_nlg_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_nlg_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_nlt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_nlt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_nlt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_o_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_o_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_o_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_t_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_t_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_t_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_t_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_t_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_t_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_t_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_u_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_u_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + v_cmpx_u_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>` + +VOPD +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST0** **DST1** **SRC0** **SRC1** **SRC2** **SRC3** **SRC4** **SRC5** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_dual_add_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_add_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_add_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_add_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_add_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_add_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_add_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_add_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_add_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_add_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_add_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_add_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_add_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>` + v_dual_add_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_add_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_add_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_add_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_cndmask_b32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_cndmask_b32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_cndmask_b32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_cndmask_b32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_cndmask_b32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_cndmask_b32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_cndmask_b32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_cndmask_b32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_cndmask_b32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_cndmask_b32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_cndmask_b32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_cndmask_b32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_cndmask_b32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_cndmask_b32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_cndmask_b32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_cndmask_b32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_cndmask_b32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_dot2acc_f32_bf16_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_bf16_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_bf16_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_bf16_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_dot2acc_f32_bf16_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_bf16_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_bf16_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_dot2acc_f32_bf16_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_bf16_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_dot2acc_f32_bf16_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_bf16_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_bf16_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_bf16_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>` + v_dual_dot2acc_f32_bf16_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_bf16_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_bf16_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_bf16_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_f16_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_f16_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_f16_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_f16_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_dot2acc_f32_f16_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_f16_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_f16_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_dot2acc_f32_f16_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_f16_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_dot2acc_f32_f16_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_f16_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_f16_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_f16_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>` + v_dual_dot2acc_f32_f16_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_f16_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_f16_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_dot2acc_f32_f16_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_fmaak_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmaak_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmac_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_fmac_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_fmac_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_fmac_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_fmac_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_fmac_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_fmac_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmac_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_fmac_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmac_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_fmac_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_fmac_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_fmac_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>` + v_dual_fmac_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_fmac_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_fmac_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_fmac_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_fmamk_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmamk_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_max_num_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_max_num_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_max_num_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_max_num_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_max_num_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_max_num_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_max_num_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_max_num_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_max_num_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_max_num_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_max_num_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_max_num_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_max_num_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>` + v_dual_max_num_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_max_num_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_max_num_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_max_num_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_min_num_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_min_num_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_min_num_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_min_num_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_min_num_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_min_num_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_min_num_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_min_num_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_min_num_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_min_num_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_min_num_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_min_num_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_min_num_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>` + v_dual_min_num_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_min_num_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_min_num_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_min_num_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mov_b32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mov_b32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mov_b32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mov_b32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_mov_b32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mov_b32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mov_b32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_mov_b32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mov_b32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_mov_b32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mov_b32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mov_b32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mov_b32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>` + v_dual_mov_b32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mov_b32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mov_b32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mov_b32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_dx9_zero_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_dx9_zero_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_dx9_zero_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_dx9_zero_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_mul_dx9_zero_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_dx9_zero_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_dx9_zero_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_mul_dx9_zero_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_dx9_zero_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_mul_dx9_zero_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_dx9_zero_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_dx9_zero_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_dx9_zero_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>` + v_dual_mul_dx9_zero_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_dx9_zero_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_dx9_zero_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_dx9_zero_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_mul_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_mul_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_mul_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>` + v_dual_mul_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_mul_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_sub_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_sub_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_sub_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_sub_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_sub_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_sub_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_sub_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_sub_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_sub_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_sub_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_sub_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_sub_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_sub_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>` + v_dual_sub_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_sub_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_sub_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_sub_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_subrev_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_subrev_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_subrev_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_subrev_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_subrev_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_subrev_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_subrev_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_subrev_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_subrev_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_subrev_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_subrev_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_subrev_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_subrev_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>` + v_dual_subrev_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_subrev_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_subrev_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_subrev_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + +VOPDX +----- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_dual_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>` + v_dual_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>` + v_dual_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>` + v_dual_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>` + v_dual_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>` + v_dual_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>` + v_dual_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>` + v_dual_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>` + v_dual_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>` + v_dual_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>` + v_dual_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>` + v_dual_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>` + v_dual_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>` + v_dual_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>` + +VOPDY +----- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_dual_add_nc_u32 :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_and_b32 :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + v_dual_lshlrev_b32 :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>` + +VSAMPLE +------- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + image_gather4 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_gather4_b :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_gather4_b_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_gather4_c :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_gather4_c_b :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_gather4_c_b_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_gather4_c_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_gather4_c_l :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_gather4_c_lz :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_gather4_c_lz_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_gather4_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_gather4_l :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_gather4_lz :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_gather4_lz_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_gather4_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_gather4h :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_get_lod :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_msaa_load :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_b :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_b_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_b_cl_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_b_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_b :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_b_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_b_cl_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_b_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_cl_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_d :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_d_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_d_cl_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_d_cl_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_d_cl_o_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_d_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_d_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_d_o_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_l :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_l_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_lz :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_lz_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_c_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_cl_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_d :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_d_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_d_cl_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_d_cl_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_d_cl_o_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_d_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_d_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_d_o_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_l :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_l_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_lz :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_lz_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + image_sample_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + +VSCRATCH +-------- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + scratch_load_b128 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_b96 :ref:`vdst<amdgpu_synid_gfx12_vdst_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_block :ref:`vdst<amdgpu_synid_gfx12_vdst_2eda77>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_d16_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_d16_hi_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_d16_hi_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_d16_hi_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_d16_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_d16_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_lds_b32 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_lds_i16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_lds_i8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_lds_u16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_lds_u8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_load_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_store_b128 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_e016a1>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_store_b16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_store_b32 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_store_b64 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_store_b8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_store_b96 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_56f215>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_store_block :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_89fd7b>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_store_d16_hi_b16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + scratch_store_d16_hi_b8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>` + +.. |---| unicode:: U+02014 .. em dash + +.. toctree:: + :hidden: + + gfx12_addr + gfx12_attr + gfx12_data0_56f215 + gfx12_data0_6802ce + gfx12_data0_e016a1 + gfx12_data0_fd235e + gfx12_data1_6802ce + gfx12_data1_731030 + gfx12_data1_e016a1 + gfx12_data1_fd235e + gfx12_ioffset + gfx12_literal_1f74c7 + gfx12_literal_81e671 + gfx12_m + gfx12_rsrc_5fe6d8 + gfx12_rsrc_c9f929 + gfx12_saddr_cdc95c + gfx12_saddr_d42b64 + gfx12_samp + gfx12_sbase_453b95 + gfx12_sbase_47adb7 + gfx12_sdata_0974a4 + gfx12_sdata_354189 + gfx12_sdata_4585b8 + gfx12_sdata_5c7b50 + gfx12_sdata_6c003b + gfx12_sdata_836716 + gfx12_sdata_d725ab + gfx12_sdata_dd9dd8 + gfx12_sdst_006c40 + gfx12_sdst_20064d + gfx12_sdst_354189 + gfx12_sdst_836716 + gfx12_sdst_ced58d + gfx12_sdst_e701cc + gfx12_simm16_15ccdd + gfx12_simm16_218bea + gfx12_simm16_39b593 + gfx12_simm16_3d2a4f + gfx12_simm16_730a13 + gfx12_simm16_7ed651 + gfx12_simm16_81e671 + gfx12_simm16_c98889 + gfx12_simm16_cc1716 + gfx12_simm16_ee8b30 + gfx12_soffset_8ec073 + gfx12_soffset_c5b88c + gfx12_soffset_ec005a + gfx12_src0_5727cf + gfx12_src0_5cae62 + gfx12_src0_6802ce + gfx12_src0_85aab6 + gfx12_src0_c4593f + gfx12_src0_e016a1 + gfx12_src0_fd235e + gfx12_src1_5727cf + gfx12_src1_5cae62 + gfx12_src1_6802ce + gfx12_src1_731030 + gfx12_src1_977794 + gfx12_src1_c4593f + gfx12_src1_e016a1 + gfx12_src1_fd235e + gfx12_src2_2797bc + gfx12_src2_5727cf + gfx12_src2_5cae62 + gfx12_src2_6802ce + gfx12_src2_7b936a + gfx12_src2_96fbd3 + gfx12_src2_c4593f + gfx12_src2_e016a1 + gfx12_srcx0 + gfx12_srcy0 + gfx12_ssrc0_007f9c + gfx12_ssrc0_1a9ca5 + gfx12_ssrc0_245536 + gfx12_ssrc0_2797bc + gfx12_ssrc0_bbb4c6 + gfx12_ssrc0_c4593f + gfx12_ssrc1_bbb4c6 + gfx12_ssrc1_c4593f + gfx12_tgt + gfx12_vaddr_a972b9 + gfx12_vaddr_c12f43 + gfx12_vaddr_c8b8d4 + gfx12_vaddr_d82160 + gfx12_vaddr_f2b449 + gfx12_vcc + gfx12_vdata_2eda77 + gfx12_vdata_48e42f + gfx12_vdata_69a144 + gfx12_vdata_89680f + gfx12_vdata_aac3e8 + gfx12_vdata_bdb32f + gfx12_vdst_006c40 + gfx12_vdst_227281 + gfx12_vdst_2eda77 + gfx12_vdst_47d3bc + gfx12_vdst_48e42f + gfx12_vdst_69a144 + gfx12_vdst_7de8e7 + gfx12_vdst_836716 + gfx12_vdst_89680f + gfx12_vdst_bdb32f + gfx12_vdstx + gfx12_vdsty + gfx12_vsrc0 + gfx12_vsrc1_6802ce + gfx12_vsrc1_fd235e + gfx12_vsrc2 + gfx12_vsrc3 + gfx12_vsrc_56f215 + gfx12_vsrc_6802ce + gfx12_vsrc_89fd7b + gfx12_vsrc_e016a1 + gfx12_vsrc_fd235e + gfx12_vsrcx1 + gfx12_vsrcy1 + gfx12_clause + gfx12_delay + gfx12_hwreg + gfx12_imm16 + gfx12_label + gfx12_sendmsg + gfx12_sendmsg_rtn + gfx12_version + gfx12_waitcnt diff --git a/llvm/docs/AMDGPU/gfx12_addr.rst b/llvm/docs/AMDGPU/gfx12_addr.rst new file mode 100644 index 0000000..d2fc0e0 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_addr.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_addr: + +addr +==== + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_attr.rst b/llvm/docs/AMDGPU/gfx12_attr.rst new file mode 100644 index 0000000..a6c5c27 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_attr.rst @@ -0,0 +1,28 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_attr: + +attr +==== + +Interpolation attribute and channel: + + ============== =================================== + Syntax Description + ============== =================================== + attr{0..32}.x Attribute 0..32 with *x* channel. + attr{0..32}.y Attribute 0..32 with *y* channel. + attr{0..32}.z Attribute 0..32 with *z* channel. + attr{0..32}.w Attribute 0..32 with *w* channel. + ============== =================================== + +Examples: + +.. parsed-literal:: + + ds_param_load v1, attr0.x diff --git a/llvm/docs/AMDGPU/gfx12_clause.rst b/llvm/docs/AMDGPU/gfx12_clause.rst new file mode 100644 index 0000000..88feb3b --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_clause.rst @@ -0,0 +1,7 @@ +.. _amdgpu_synid_clause: + +clause +====== + +Description of a clause following this instruction. + diff --git a/llvm/docs/AMDGPU/gfx12_data0_56f215.rst b/llvm/docs/AMDGPU/gfx12_data0_56f215.rst new file mode 100644 index 0000000..d8dde00 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data0_56f215.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data0_56f215: + +data0 +===== + +Instruction input. + +*Size:* 3 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_data0_6802ce.rst b/llvm/docs/AMDGPU/gfx12_data0_6802ce.rst new file mode 100644 index 0000000..02fe36f --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data0_6802ce.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data0_6802ce: + +data0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_data0_e016a1.rst b/llvm/docs/AMDGPU/gfx12_data0_e016a1.rst new file mode 100644 index 0000000..914715b --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data0_e016a1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data0_e016a1: + +data0 +===== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_data0_fd235e.rst b/llvm/docs/AMDGPU/gfx12_data0_fd235e.rst new file mode 100644 index 0000000..7617c61 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data0_fd235e.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data0_fd235e: + +data0 +===== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_data1_6802ce.rst b/llvm/docs/AMDGPU/gfx12_data1_6802ce.rst new file mode 100644 index 0000000..318db2d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data1_6802ce.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data1_6802ce: + +data1 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_data1_731030.rst b/llvm/docs/AMDGPU/gfx12_data1_731030.rst new file mode 100644 index 0000000..1a6eda6 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data1_731030.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data1_731030: + +data1 +===== + +Instruction input. + +*Size:* 8 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_data1_e016a1.rst b/llvm/docs/AMDGPU/gfx12_data1_e016a1.rst new file mode 100644 index 0000000..dee4148 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data1_e016a1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data1_e016a1: + +data1 +===== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_data1_fd235e.rst b/llvm/docs/AMDGPU/gfx12_data1_fd235e.rst new file mode 100644 index 0000000..c8d4a88 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data1_fd235e.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data1_fd235e: + +data1 +===== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_delay.rst b/llvm/docs/AMDGPU/gfx12_delay.rst new file mode 100644 index 0000000..600ece7 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_delay.rst @@ -0,0 +1,74 @@ +.. _amdgpu_synid_delay: + +delay +===== + +A delay between dependent SALU/VALU instructions. +This operand may specify a delay for 2 instructions: +the one after the current *s_delay_alu* instruction +and for the second instruction indicated by *SKIP*. + +The bits of this operand have the following meaning: + + ===== ========================================================== ============ + Bits Description Value Range + ===== ========================================================== ============ + 3:0 ID0: indicates a delay for the first instruction. 0..11 + 6:4 SKIP: indicates the position of the second instruction. 0..5 + 10:7 ID1: indicates a delay for the second instruction. 0..11 + ===== ========================================================== ============ + +This operand may be specified as one of the following: + +* An :ref:`integer_number<amdgpu_synid_integer_number>` or an :ref:`absolute_expression<amdgpu_synid_absolute_expression>`. The value must be in the range 0..0xFFFF. +* A combination of *instid0*, *instskip*, *instid1* values described below. + + ======================== =========================== =============== + Syntax Description Default Value + ======================== =========================== =============== + instid0(<*ID name*>) A symbolic *ID0* value. instid0(NO_DEP) + instskip(<*SKIP name*>) A symbolic *SKIP* value. instskip(SAME) + instid1(<*ID name*>) A symbolic *ID1* value. instid1(NO_DEP) + ======================== =========================== =============== + +These values may be specified in any order. +When more than one value is specified, the values must be separated from each other by a '|'. + +Valid *ID names* are defined below. + + =================== =================================================================== + Name Description + =================== =================================================================== + NO_DEP No dependency on any prior instruction. This is the default value. + VALU_DEP_1 Dependency on a previous VALU instruction, 1 opcode back. + VALU_DEP_2 Dependency on a previous VALU instruction, 2 opcodes back. + VALU_DEP_3 Dependency on a previous VALU instruction, 3 opcodes back. + VALU_DEP_4 Dependency on a previous VALU instruction, 4 opcodes back. + TRANS32_DEP_1 Dependency on a previous TRANS32 instruction, 1 opcode back. + TRANS32_DEP_2 Dependency on a previous TRANS32 instruction, 2 opcodes back. + TRANS32_DEP_3 Dependency on a previous TRANS32 instruction, 3 opcodes back. + FMA_ACCUM_CYCLE_1 Single cycle penalty for FMA accumulation. + SALU_CYCLE_1 1 cycle penalty for a prior SALU instruction. + SALU_CYCLE_2 2 cycle penalty for a prior SALU instruction. + SALU_CYCLE_3 3 cycle penalty for a prior SALU instruction. + =================== =================================================================== + +Legal *SKIP names* are described in the following table. + + ======== ============================================================================ + Name Description + ======== ============================================================================ + SAME Apply second dependency to the same instruction. This is the default value. + NEXT Apply second dependency to the next instruction. + SKIP_1 Skip 1 instruction then apply dependency. + SKIP_2 Skip 2 instructions then apply dependency. + SKIP_3 Skip 3 instructions then apply dependency. + SKIP_4 Skip 4 instructions then apply dependency. + ======== ============================================================================ + +Examples: + +.. parsed-literal:: + + s_delay_alu instid0(VALU_DEP_1) + s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) diff --git a/llvm/docs/AMDGPU/gfx12_hwreg.rst b/llvm/docs/AMDGPU/gfx12_hwreg.rst new file mode 100644 index 0000000..d99cb20 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_hwreg.rst @@ -0,0 +1,76 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_hwreg: + +hwreg +===== + +Bits of a hardware register being accessed. + +The bits of this operand have the following meaning: + + ======= ===================== ============ + Bits Description Value Range + ======= ===================== ============ + 5:0 Register *id*. 0..63 + 10:6 First bit *offset*. 0..31 + 15:11 *Size* in bits. 1..32 + ======= ===================== ============ + +This operand may be specified as one of the following: + +* An :ref:`integer_number<amdgpu_synid_integer_number>` or an :ref:`absolute_expression<amdgpu_synid_absolute_expression>`. The value must be in the range 0..0xFFFF. +* An *hwreg* value described below. + + ==================================== ============================================================================ + Hwreg Value Syntax Description + ==================================== ============================================================================ + hwreg({0..63}) All bits of a register indicated by its *id*. + hwreg(<*name*>) All bits of a register indicated by its *name*. + hwreg({0..63}, {0..31}, {1..32}) Register bits indicated by register *id*, first bit *offset* and *size*. + hwreg(<*name*>, {0..31}, {1..32}) Register bits indicated by register *name*, first bit *offset* and *size*. + ==================================== ============================================================================ + +Numeric values may be specified as positive :ref:`integer numbers<amdgpu_synid_integer_number>` +or :ref:`absolute expressions<amdgpu_synid_absolute_expression>`. + +Defined register *names* include: + + =================== ========================================== + Name Description + =================== ========================================== + HW_REG_MODE Shader writeable mode bits. + HW_REG_STATUS Shader read-only status. + HW_REG_TRAPSTS Trap status. + HW_REG_HW_ID1 Id of wave, simd, compute unit, etc. + HW_REG_HW_ID2 Id of queue, pipeline, etc. + HW_REG_GPR_ALLOC Per-wave SGPR and VGPR allocation. + HW_REG_LDS_ALLOC Per-wave LDS allocation. + HW_REG_IB_STS Counters of outstanding instructions. + HW_REG_SH_MEM_BASES Memory aperture. + HW_REG_FLAT_SCR_LO flat_scratch_lo register. + HW_REG_FLAT_SCR_HI flat_scratch_hi register. + =================== ========================================== + +Examples: + +.. parsed-literal:: + + reg = 1 + offset = 2 + size = 4 + hwreg_enc = reg | (offset << 6) | ((size - 1) << 11) + + s_getreg_b32 s2, 0x1881 + s_getreg_b32 s2, hwreg_enc // the same as above + s_getreg_b32 s2, hwreg(1, 2, 4) // the same as above + s_getreg_b32 s2, hwreg(reg, offset, size) // the same as above + + s_getreg_b32 s2, hwreg(15) + s_getreg_b32 s2, hwreg(51, 1, 31) + s_getreg_b32 s2, hwreg(HW_REG_LDS_ALLOC, 0, 1) diff --git a/llvm/docs/AMDGPU/gfx12_imm16.rst b/llvm/docs/AMDGPU/gfx12_imm16.rst new file mode 100644 index 0000000..44e6d58 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_imm16.rst @@ -0,0 +1,7 @@ +.. _amdgpu_synid_imm16: + +imm16 +====== + +An :ref:`integer_number<amdgpu_synid_integer_number>` or an :ref:`absolute_expression<amdgpu_synid_absolute_expression>`. The value must be in the range -32768..65535. + diff --git a/llvm/docs/AMDGPU/gfx12_ioffset.rst b/llvm/docs/AMDGPU/gfx12_ioffset.rst new file mode 100644 index 0000000..0901b77 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ioffset.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ioffset: + +ioffset +======= + +*Size:* 1 dword. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_label.rst b/llvm/docs/AMDGPU/gfx12_label.rst new file mode 100644 index 0000000..bdd6e1c --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_label.rst @@ -0,0 +1,29 @@ +.. _amdgpu_synid_label: + +label +===== + +A branch target which is a 16-bit signed integer treated as a PC-relative dword offset. + +This operand may be specified as one of the following: + +* An :ref:`integer_number<amdgpu_synid_integer_number>` or an :ref:`absolute_expression<amdgpu_synid_absolute_expression>`. The value must be in the range -32768..65535. +* A :ref:`symbol<amdgpu_synid_symbol>` (for example, a label) representing a relocatable address in the same compilation unit where it is referred from. The value is handled as a 16-bit PC-relative dword offset to be resolved by a linker. + +Examples: + +.. parsed-literal:: + + offset = 30 + label_1: + label_2 = . + 4 + + s_branch 32 + s_branch offset + 2 + s_branch label_1 + s_branch label_2 + s_branch label_3 + s_branch label_4 + + label_3 = label_2 + 4 + label_4: diff --git a/llvm/docs/AMDGPU/gfx12_literal_1f74c7.rst b/llvm/docs/AMDGPU/gfx12_literal_1f74c7.rst new file mode 100644 index 0000000..7442c5d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_literal_1f74c7.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_literal_1f74c7: + +literal +======= + +*Size:* 2 dwords. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_literal_81e671.rst b/llvm/docs/AMDGPU/gfx12_literal_81e671.rst new file mode 100644 index 0000000..ab1b056 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_literal_81e671.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_literal_81e671: + +literal +======= + +*Size:* 1 dword. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_m.rst b/llvm/docs/AMDGPU/gfx12_m.rst new file mode 100644 index 0000000..7cfee90 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_m.rst @@ -0,0 +1,13 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_m: + +m += + +This operand may be used with floating point operand modifiers :ref:`abs<amdgpu_synid_abs>` and :ref:`neg<amdgpu_synid_neg>`. diff --git a/llvm/docs/AMDGPU/gfx12_rsrc_5fe6d8.rst b/llvm/docs/AMDGPU/gfx12_rsrc_5fe6d8.rst new file mode 100644 index 0000000..d1a475f --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_rsrc_5fe6d8.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_rsrc_5fe6d8: + +rsrc +==== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>` diff --git a/llvm/docs/AMDGPU/gfx12_rsrc_c9f929.rst b/llvm/docs/AMDGPU/gfx12_rsrc_c9f929.rst new file mode 100644 index 0000000..180ae06 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_rsrc_c9f929.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_rsrc_c9f929: + +rsrc +==== + +Instruction input. + +*Size:* 8 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>` diff --git a/llvm/docs/AMDGPU/gfx12_saddr_cdc95c.rst b/llvm/docs/AMDGPU/gfx12_saddr_cdc95c.rst new file mode 100644 index 0000000..4b3511f --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_saddr_cdc95c.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_saddr_cdc95c: + +saddr +===== + +*Size:* 2 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>` diff --git a/llvm/docs/AMDGPU/gfx12_saddr_d42b64.rst b/llvm/docs/AMDGPU/gfx12_saddr_d42b64.rst new file mode 100644 index 0000000..d3de11d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_saddr_d42b64.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_saddr_d42b64: + +saddr +===== + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_samp.rst b/llvm/docs/AMDGPU/gfx12_samp.rst new file mode 100644 index 0000000..2bb15e5 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_samp.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_samp: + +samp +==== + +*Size:* 4 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>` diff --git a/llvm/docs/AMDGPU/gfx12_sbase_453b95.rst b/llvm/docs/AMDGPU/gfx12_sbase_453b95.rst new file mode 100644 index 0000000..54c2dee --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sbase_453b95.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sbase_453b95: + +sbase +===== + +A 128-bit buffer resource constant for scalar memory operations which provides a base address, a size and a stride. + +*Size:* 4 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>` diff --git a/llvm/docs/AMDGPU/gfx12_sbase_47adb7.rst b/llvm/docs/AMDGPU/gfx12_sbase_47adb7.rst new file mode 100644 index 0000000..2308b3d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sbase_47adb7.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sbase_47adb7: + +sbase +===== + +A 64-bit base address for scalar memory operations. + +*Size:* 2 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>` diff --git a/llvm/docs/AMDGPU/gfx12_sdata_0974a4.rst b/llvm/docs/AMDGPU/gfx12_sdata_0974a4.rst new file mode 100644 index 0000000..d498f8c --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_0974a4.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_0974a4: + +sdata +===== + +Instruction output. + +*Size:* 8 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>` diff --git a/llvm/docs/AMDGPU/gfx12_sdata_354189.rst b/llvm/docs/AMDGPU/gfx12_sdata_354189.rst new file mode 100644 index 0000000..c506654 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_354189.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_354189: + +sdata +===== + +Instruction output. + +*Size:* 2 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>` diff --git a/llvm/docs/AMDGPU/gfx12_sdata_4585b8.rst b/llvm/docs/AMDGPU/gfx12_sdata_4585b8.rst new file mode 100644 index 0000000..42f66f3 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_4585b8.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_4585b8: + +sdata +===== + +Instruction output. + +*Size:* 4 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>` diff --git a/llvm/docs/AMDGPU/gfx12_sdata_5c7b50.rst b/llvm/docs/AMDGPU/gfx12_sdata_5c7b50.rst new file mode 100644 index 0000000..028461a --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_5c7b50.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_5c7b50: + +sdata +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_sdata_6c003b.rst b/llvm/docs/AMDGPU/gfx12_sdata_6c003b.rst new file mode 100644 index 0000000..87e19a9 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_6c003b.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_6c003b: + +sdata +===== + +Instruction output. + +*Size:* 16 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>` diff --git a/llvm/docs/AMDGPU/gfx12_sdata_836716.rst b/llvm/docs/AMDGPU/gfx12_sdata_836716.rst new file mode 100644 index 0000000..be1bce9 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_836716.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_836716: + +sdata +===== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_sdata_d725ab.rst b/llvm/docs/AMDGPU/gfx12_sdata_d725ab.rst new file mode 100644 index 0000000..c882df8 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_d725ab.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_d725ab: + +sdata +===== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`simm8<amdgpu_synid_simm8>` diff --git a/llvm/docs/AMDGPU/gfx12_sdata_dd9dd8.rst b/llvm/docs/AMDGPU/gfx12_sdata_dd9dd8.rst new file mode 100644 index 0000000..6465889 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_dd9dd8.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_dd9dd8: + +sdata +===== + +Instruction output. + +*Size:* 3 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>` diff --git a/llvm/docs/AMDGPU/gfx12_sdst_006c40.rst b/llvm/docs/AMDGPU/gfx12_sdst_006c40.rst new file mode 100644 index 0000000..f269b05 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdst_006c40.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdst_006c40: + +sdst +==== + +Instruction output. + +*Size:* 2 dwords. + +*Operands:* :ref:`vcc<amdgpu_synid_vcc>` diff --git a/llvm/docs/AMDGPU/gfx12_sdst_20064d.rst b/llvm/docs/AMDGPU/gfx12_sdst_20064d.rst new file mode 100644 index 0000000..83c11a2 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdst_20064d.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdst_20064d: + +sdst +==== + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_sdst_354189.rst b/llvm/docs/AMDGPU/gfx12_sdst_354189.rst new file mode 100644 index 0000000..8433406 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdst_354189.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdst_354189: + +sdst +==== + +Instruction output. + +*Size:* 2 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>` diff --git a/llvm/docs/AMDGPU/gfx12_sdst_836716.rst b/llvm/docs/AMDGPU/gfx12_sdst_836716.rst new file mode 100644 index 0000000..abce569 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdst_836716.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdst_836716: + +sdst +==== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_sdst_ced58d.rst b/llvm/docs/AMDGPU/gfx12_sdst_ced58d.rst new file mode 100644 index 0000000..e0072d9 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdst_ced58d.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdst_ced58d: + +sdst +==== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_sdst_e701cc.rst b/llvm/docs/AMDGPU/gfx12_sdst_e701cc.rst new file mode 100644 index 0000000..33e8c37 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdst_e701cc.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdst_e701cc: + +sdst +==== + +Instruction output. + +*Size:* 1 dword if wavefront size is 32, otherwise 2 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>` diff --git a/llvm/docs/AMDGPU/gfx12_sendmsg.rst b/llvm/docs/AMDGPU/gfx12_sendmsg.rst new file mode 100644 index 0000000..cb51be0 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sendmsg.rst @@ -0,0 +1,48 @@ +.. _amdgpu_synid_sendmsg: + +sendmsg +======= + +An 8-bit value in simm16[7:0] encodes the message type. + +This operand may be specified as one of the following: + +* An :ref:`integer_number<amdgpu_synid_integer_number>` or an :ref:`absolute_expression<amdgpu_synid_absolute_expression>`. The value must be in the range 0..0xFFFF. +* A *sendmsg* value described below. + + + ==================================== ==================================================== + Sendmsg Value Syntax Description + ==================================== ==================================================== + sendmsg(<*type*>) A message identified by its *type*. + ==================================== ==================================================== + +*Type* may be specified using message *name* or message *id*. + +Numeric values may be specified as positive :ref:`integer numbers<amdgpu_synid_integer_number>` +or :ref:`absolute expressions<amdgpu_synid_absolute_expression>`. + + +Only the following message types are valid. + + ====================== =========== + Message type simm16[7:0] + ====================== =========== + Reserved 0 + MSG_INTERRUPT 1 + MSG_HS_TESSFACTOR 2 + MSG_DEALLOC_VGPRS 3 + MSG_GS_ALLOC_REQ 9 + ====================== =========== + +Examples: + +.. parsed-literal:: + + // numeric message code + msg = 0x1 + s_sendmsg 0x3 + s_sendmsg msg + 2 + + // sendmsg with strict arguments validation + s_sendmsg sendmsg(MSG_INTERRUPT) diff --git a/llvm/docs/AMDGPU/gfx12_sendmsg_rtn.rst b/llvm/docs/AMDGPU/gfx12_sendmsg_rtn.rst new file mode 100644 index 0000000..ebb591d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sendmsg_rtn.rst @@ -0,0 +1,30 @@ +.. _amdgpu_synid_sendmsg_rtn: + +sendmsg_rtn +=========== + +An 8-bit value in the instruction to encode the message type. + +This operand may be specified as one of the following: + + * An :ref:`integer_number<amdgpu_synid_integer_number>` or an :ref:`absolute_expression<amdgpu_synid_absolute_expression>`. The value must be in the range 0..0xFFFF. + * A *sendmsg* value described below. + + ==================================== ==================================================== + Sendmsg Value Syntax Description + ==================================== ==================================================== + sendmsg(MSG_RTN_GET_DOORBELL) Get doorbell ID. + sendmsg(MSG_RTN_GET_DDID) Get Draw/Dispatch ID. + sendmsg(MSG_RTN_GET_TMA) Get TMA value. + sendmsg(MSG_RTN_GET_TBA) Get TBA value. + sendmsg(MSG_RTN_GET_REALTIME) Get REALTIME value. + sendmsg(MSG_RTN_SAVE_WAVE) Report that this wave is ready to be context-saved. + ==================================== ==================================================== + +Examples: + +.. parsed-literal:: + + s_sendmsg_rtn_b32 s0, 132 + s_sendmsg_rtn_b32 s0, sendmsg(MSG_GET_REALTIME) + diff --git a/llvm/docs/AMDGPU/gfx12_simm16_15ccdd.rst b/llvm/docs/AMDGPU/gfx12_simm16_15ccdd.rst new file mode 100644 index 0000000..0cb1233 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_15ccdd.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_15ccdd: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`version<amdgpu_synid_version>` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_218bea.rst b/llvm/docs/AMDGPU/gfx12_simm16_218bea.rst new file mode 100644 index 0000000..e08605e --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_218bea.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_218bea: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`waitcnt<amdgpu_synid_waitcnt>` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_39b593.rst b/llvm/docs/AMDGPU/gfx12_simm16_39b593.rst new file mode 100644 index 0000000..babb4b6 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_39b593.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_39b593: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`imm16<amdgpu_synid_imm16>` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_3d2a4f.rst b/llvm/docs/AMDGPU/gfx12_simm16_3d2a4f.rst new file mode 100644 index 0000000..cc8dbc6 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_3d2a4f.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_3d2a4f: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`label<amdgpu_synid_label>` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_730a13.rst b/llvm/docs/AMDGPU/gfx12_simm16_730a13.rst new file mode 100644 index 0000000..93596db --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_730a13.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_730a13: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`clause<amdgpu_synid_clause>` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_7ed651.rst b/llvm/docs/AMDGPU/gfx12_simm16_7ed651.rst new file mode 100644 index 0000000..fc63930 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_7ed651.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_7ed651: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`hwreg<amdgpu_synid_hwreg>` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_81e671.rst b/llvm/docs/AMDGPU/gfx12_simm16_81e671.rst new file mode 100644 index 0000000..16dcf39 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_81e671.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_81e671: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_simm16_c98889.rst b/llvm/docs/AMDGPU/gfx12_simm16_c98889.rst new file mode 100644 index 0000000..03e007af --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_c98889.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_c98889: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`delay<amdgpu_synid_delay>` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_cc1716.rst b/llvm/docs/AMDGPU/gfx12_simm16_cc1716.rst new file mode 100644 index 0000000..e53f812 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_cc1716.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_cc1716: + +simm16 +====== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`hwreg<amdgpu_synid_hwreg>` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_ee8b30.rst b/llvm/docs/AMDGPU/gfx12_simm16_ee8b30.rst new file mode 100644 index 0000000..9bdac9b --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_ee8b30.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_ee8b30: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`sendmsg<amdgpu_synid_sendmsg>` diff --git a/llvm/docs/AMDGPU/gfx12_soffset_8ec073.rst b/llvm/docs/AMDGPU/gfx12_soffset_8ec073.rst new file mode 100644 index 0000000..44de030 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_soffset_8ec073.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_soffset_8ec073: + +soffset +======= + +An unsigned 20-bit offset added to the base address to get memory address. + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_soffset_c5b88c.rst b/llvm/docs/AMDGPU/gfx12_soffset_c5b88c.rst new file mode 100644 index 0000000..d115150 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_soffset_c5b88c.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_soffset_c5b88c: + +soffset +======= + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_soffset_ec005a.rst b/llvm/docs/AMDGPU/gfx12_soffset_ec005a.rst new file mode 100644 index 0000000..bd571b6 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_soffset_ec005a.rst @@ -0,0 +1,20 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_soffset_ec005a: + +soffset +======= + +An offset added to the base address to get memory address. + +* If offset is specified as a register, it supplies an unsigned byte offset. +* If offset is specified as a 21-bit immediate, it supplies a signed byte offset. + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_src0_5727cf.rst b/llvm/docs/AMDGPU/gfx12_src0_5727cf.rst new file mode 100644 index 0000000..15fde5c --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src0_5727cf.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src0_5727cf: + +src0 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_src0_5cae62.rst b/llvm/docs/AMDGPU/gfx12_src0_5cae62.rst new file mode 100644 index 0000000..fa02f046 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src0_5cae62.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src0_5cae62: + +src0 +==== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>` diff --git a/llvm/docs/AMDGPU/gfx12_src0_6802ce.rst b/llvm/docs/AMDGPU/gfx12_src0_6802ce.rst new file mode 100644 index 0000000..e17a719 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src0_6802ce.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src0_6802ce: + +src0 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_src0_85aab6.rst b/llvm/docs/AMDGPU/gfx12_src0_85aab6.rst new file mode 100644 index 0000000..effa6f6 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src0_85aab6.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src0_85aab6: + +src0 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_src0_c4593f.rst b/llvm/docs/AMDGPU/gfx12_src0_c4593f.rst new file mode 100644 index 0000000..bbe6191 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src0_c4593f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src0_c4593f: + +src0 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_src0_e016a1.rst b/llvm/docs/AMDGPU/gfx12_src0_e016a1.rst new file mode 100644 index 0000000..c2d23d7 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src0_e016a1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src0_e016a1: + +src0 +==== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_src0_fd235e.rst b/llvm/docs/AMDGPU/gfx12_src0_fd235e.rst new file mode 100644 index 0000000..dc048af --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src0_fd235e.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src0_fd235e: + +src0 +==== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_src1_5727cf.rst b/llvm/docs/AMDGPU/gfx12_src1_5727cf.rst new file mode 100644 index 0000000..d1d0837 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_5727cf.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_5727cf: + +src1 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_src1_5cae62.rst b/llvm/docs/AMDGPU/gfx12_src1_5cae62.rst new file mode 100644 index 0000000..3ad591c --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_5cae62.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_5cae62: + +src1 +==== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>` diff --git a/llvm/docs/AMDGPU/gfx12_src1_6802ce.rst b/llvm/docs/AMDGPU/gfx12_src1_6802ce.rst new file mode 100644 index 0000000..84ff631 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_6802ce.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_6802ce: + +src1 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_src1_731030.rst b/llvm/docs/AMDGPU/gfx12_src1_731030.rst new file mode 100644 index 0000000..8c67699 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_731030.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_731030: + +src1 +==== + +Instruction input. + +*Size:* 8 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_src1_977794.rst b/llvm/docs/AMDGPU/gfx12_src1_977794.rst new file mode 100644 index 0000000..7651340 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_977794.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_977794: + +src1 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_src1_c4593f.rst b/llvm/docs/AMDGPU/gfx12_src1_c4593f.rst new file mode 100644 index 0000000..aba4da8 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_c4593f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_c4593f: + +src1 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_src1_e016a1.rst b/llvm/docs/AMDGPU/gfx12_src1_e016a1.rst new file mode 100644 index 0000000..4385853 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_e016a1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_e016a1: + +src1 +==== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_src1_fd235e.rst b/llvm/docs/AMDGPU/gfx12_src1_fd235e.rst new file mode 100644 index 0000000..5863e93 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_fd235e.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_fd235e: + +src1 +==== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_src2_2797bc.rst b/llvm/docs/AMDGPU/gfx12_src2_2797bc.rst new file mode 100644 index 0000000..b393e2a --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_2797bc.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_2797bc: + +src2 +==== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>` diff --git a/llvm/docs/AMDGPU/gfx12_src2_5727cf.rst b/llvm/docs/AMDGPU/gfx12_src2_5727cf.rst new file mode 100644 index 0000000..9ffaa079 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_5727cf.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_5727cf: + +src2 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_src2_5cae62.rst b/llvm/docs/AMDGPU/gfx12_src2_5cae62.rst new file mode 100644 index 0000000..46d65cb --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_5cae62.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_5cae62: + +src2 +==== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>` diff --git a/llvm/docs/AMDGPU/gfx12_src2_6802ce.rst b/llvm/docs/AMDGPU/gfx12_src2_6802ce.rst new file mode 100644 index 0000000..0ad2ede --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_6802ce.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_6802ce: + +src2 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_src2_7b936a.rst b/llvm/docs/AMDGPU/gfx12_src2_7b936a.rst new file mode 100644 index 0000000..9f1ea3c --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_7b936a.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_7b936a: + +src2 +==== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`fconst<amdgpu_synid_fconst>` diff --git a/llvm/docs/AMDGPU/gfx12_src2_96fbd3.rst b/llvm/docs/AMDGPU/gfx12_src2_96fbd3.rst new file mode 100644 index 0000000..884d089 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_96fbd3.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_96fbd3: + +src2 +==== + +Instruction input. + +*Size:* 8 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`fconst<amdgpu_synid_fconst>` diff --git a/llvm/docs/AMDGPU/gfx12_src2_c4593f.rst b/llvm/docs/AMDGPU/gfx12_src2_c4593f.rst new file mode 100644 index 0000000..849230b --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_c4593f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_c4593f: + +src2 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_src2_e016a1.rst b/llvm/docs/AMDGPU/gfx12_src2_e016a1.rst new file mode 100644 index 0000000..266c4ea --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_e016a1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_e016a1: + +src2 +==== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_srcx0.rst b/llvm/docs/AMDGPU/gfx12_srcx0.rst new file mode 100644 index 0000000..57b05a1 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_srcx0.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_srcx0: + +srcx0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_srcy0.rst b/llvm/docs/AMDGPU/gfx12_srcy0.rst new file mode 100644 index 0000000..350b742 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_srcy0.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_srcy0: + +srcy0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_007f9c.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_007f9c.rst new file mode 100644 index 0000000..c3f33e4f --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc0_007f9c.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc0_007f9c: + +ssrc0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_1a9ca5.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_1a9ca5.rst new file mode 100644 index 0000000..5aa3f2d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc0_1a9ca5.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc0_1a9ca5: + +ssrc0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`m0<amdgpu_synid_m0>` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_245536.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_245536.rst new file mode 100644 index 0000000..36925da --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc0_245536.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc0_245536: + +ssrc0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`sendmsg_rtn<amdgpu_synid_sendmsg_rtn>` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_2797bc.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_2797bc.rst new file mode 100644 index 0000000..4eae705 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc0_2797bc.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc0_2797bc: + +ssrc0 +===== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_bbb4c6.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_bbb4c6.rst new file mode 100644 index 0000000..a29f83d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc0_bbb4c6.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc0_bbb4c6: + +ssrc0 +===== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_c4593f.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_c4593f.rst new file mode 100644 index 0000000..33ca4d6 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc0_c4593f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc0_c4593f: + +ssrc0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc1_bbb4c6.rst b/llvm/docs/AMDGPU/gfx12_ssrc1_bbb4c6.rst new file mode 100644 index 0000000..1f3ea34 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc1_bbb4c6.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc1_bbb4c6: + +ssrc1 +===== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc1_c4593f.rst b/llvm/docs/AMDGPU/gfx12_ssrc1_c4593f.rst new file mode 100644 index 0000000..f81d0f2 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc1_c4593f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc1_c4593f: + +ssrc1 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_tgt.rst b/llvm/docs/AMDGPU/gfx12_tgt.rst new file mode 100644 index 0000000..83a25aa --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_tgt.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_tgt: + +tgt +=== + +Instruction output. + +*Size:* 4 dwords. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_vaddr_a972b9.rst b/llvm/docs/AMDGPU/gfx12_vaddr_a972b9.rst new file mode 100644 index 0000000..223b50d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vaddr_a972b9.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vaddr_a972b9: + +vaddr +===== + +*Size:* 11 dwords. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_vaddr_c12f43.rst b/llvm/docs/AMDGPU/gfx12_vaddr_c12f43.rst new file mode 100644 index 0000000..5a93efe --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vaddr_c12f43.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vaddr_c12f43: + +vaddr +===== + +*Size:* 12 dwords. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_vaddr_c8b8d4.rst b/llvm/docs/AMDGPU/gfx12_vaddr_c8b8d4.rst new file mode 100644 index 0000000..1998e1d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vaddr_c8b8d4.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vaddr_c8b8d4: + +vaddr +===== + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vaddr_d82160.rst b/llvm/docs/AMDGPU/gfx12_vaddr_d82160.rst new file mode 100644 index 0000000..92d09a2 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vaddr_d82160.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vaddr_d82160: + +vaddr +===== + +*Size:* 4 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vaddr_f2b449.rst b/llvm/docs/AMDGPU/gfx12_vaddr_f2b449.rst new file mode 100644 index 0000000..10d7e0a --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vaddr_f2b449.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vaddr_f2b449: + +vaddr +===== + +*Size:* 2 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vcc.rst b/llvm/docs/AMDGPU/gfx12_vcc.rst new file mode 100644 index 0000000..e8509ff --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vcc.rst @@ -0,0 +1,16 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vcc: + +vcc +=== + +Vector condition code. This operand depends on wavefront size: + +* Should be :ref:`vcc_lo<amdgpu_synid_vcc_lo>` if wavefront size is 32. +* Should be :ref:`vcc<amdgpu_synid_vcc>` if wavefront size is 64. diff --git a/llvm/docs/AMDGPU/gfx12_vdata_2eda77.rst b/llvm/docs/AMDGPU/gfx12_vdata_2eda77.rst new file mode 100644 index 0000000..839ec86 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdata_2eda77.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdata_2eda77: + +vdata +===== + +Instruction output. + +*Size:* 32 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vdata_48e42f.rst b/llvm/docs/AMDGPU/gfx12_vdata_48e42f.rst new file mode 100644 index 0000000..d2ab49a --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdata_48e42f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdata_48e42f: + +vdata +===== + +Instruction output. + +*Size:* 3 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vdata_69a144.rst b/llvm/docs/AMDGPU/gfx12_vdata_69a144.rst new file mode 100644 index 0000000..22ac087 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdata_69a144.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdata_69a144: + +vdata +===== + +Instruction output. + +*Size:* 4 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vdata_89680f.rst b/llvm/docs/AMDGPU/gfx12_vdata_89680f.rst new file mode 100644 index 0000000..5f4f478 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdata_89680f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdata_89680f: + +vdata +===== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vdata_aac3e8.rst b/llvm/docs/AMDGPU/gfx12_vdata_aac3e8.rst new file mode 100644 index 0000000..2e285ef --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdata_aac3e8.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdata_aac3e8: + +vdata +===== + +Instruction output. + +*Size:* 10 dwords. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_vdata_bdb32f.rst b/llvm/docs/AMDGPU/gfx12_vdata_bdb32f.rst new file mode 100644 index 0000000..109c767 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdata_bdb32f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdata_bdb32f: + +vdata +===== + +Instruction output. + +*Size:* 2 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_006c40.rst b/llvm/docs/AMDGPU/gfx12_vdst_006c40.rst new file mode 100644 index 0000000..dc3ac95 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_006c40.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_006c40: + +vdst +==== + +Instruction output. + +*Size:* 2 dwords. + +*Operands:* :ref:`vcc<amdgpu_synid_vcc>` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_227281.rst b/llvm/docs/AMDGPU/gfx12_vdst_227281.rst new file mode 100644 index 0000000..13fd951 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_227281.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_227281: + +vdst +==== + +Instruction output. + +*Size:* 4 dwords if wavefront size is 64, otherwise 8 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_2eda77.rst b/llvm/docs/AMDGPU/gfx12_vdst_2eda77.rst new file mode 100644 index 0000000..9372e48 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_2eda77.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_2eda77: + +vdst +==== + +Instruction output. + +*Size:* 32 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_47d3bc.rst b/llvm/docs/AMDGPU/gfx12_vdst_47d3bc.rst new file mode 100644 index 0000000..056fe3f --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_47d3bc.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_47d3bc: + +vdst +==== + +Instruction output. + +*Size:* 8 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_48e42f.rst b/llvm/docs/AMDGPU/gfx12_vdst_48e42f.rst new file mode 100644 index 0000000..84ab35b --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_48e42f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_48e42f: + +vdst +==== + +Instruction output. + +*Size:* 3 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_69a144.rst b/llvm/docs/AMDGPU/gfx12_vdst_69a144.rst new file mode 100644 index 0000000..70873ff --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_69a144.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_69a144: + +vdst +==== + +Instruction output. + +*Size:* 4 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_7de8e7.rst b/llvm/docs/AMDGPU/gfx12_vdst_7de8e7.rst new file mode 100644 index 0000000..7248ea9 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_7de8e7.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_7de8e7: + +vdst +==== + +Instruction output. + +*Size:* 2 dwords. + +*Operands:* :ref:`exec<amdgpu_synid_exec>` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_836716.rst b/llvm/docs/AMDGPU/gfx12_vdst_836716.rst new file mode 100644 index 0000000..1cd43ee9 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_836716.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_836716: + +vdst +==== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_89680f.rst b/llvm/docs/AMDGPU/gfx12_vdst_89680f.rst new file mode 100644 index 0000000..b4f055c --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_89680f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_89680f: + +vdst +==== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_bdb32f.rst b/llvm/docs/AMDGPU/gfx12_vdst_bdb32f.rst new file mode 100644 index 0000000..e2a4a47 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_bdb32f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_bdb32f: + +vdst +==== + +Instruction output. + +*Size:* 2 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vdstx.rst b/llvm/docs/AMDGPU/gfx12_vdstx.rst new file mode 100644 index 0000000..4b95d4d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdstx.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdstx: + +vdstx +===== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vdsty.rst b/llvm/docs/AMDGPU/gfx12_vdsty.rst new file mode 100644 index 0000000..cf0b464 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdsty.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdsty: + +vdsty +===== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_version.rst b/llvm/docs/AMDGPU/gfx12_version.rst new file mode 100644 index 0000000..4e490ca --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_version.rst @@ -0,0 +1,7 @@ +.. _amdgpu_synid_version: + +version +======= + +Microcode version header. + diff --git a/llvm/docs/AMDGPU/gfx12_vsrc0.rst b/llvm/docs/AMDGPU/gfx12_vsrc0.rst new file mode 100644 index 0000000..fb38169 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc0.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc0: + +vsrc0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc1_6802ce.rst b/llvm/docs/AMDGPU/gfx12_vsrc1_6802ce.rst new file mode 100644 index 0000000..4490545 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc1_6802ce.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc1_6802ce: + +vsrc1 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc1_fd235e.rst b/llvm/docs/AMDGPU/gfx12_vsrc1_fd235e.rst new file mode 100644 index 0000000..d6567c2 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc1_fd235e.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc1_fd235e: + +vsrc1 +===== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc2.rst b/llvm/docs/AMDGPU/gfx12_vsrc2.rst new file mode 100644 index 0000000..fe20832 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc2.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc2: + +vsrc2 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc3.rst b/llvm/docs/AMDGPU/gfx12_vsrc3.rst new file mode 100644 index 0000000..18df9e4 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc3.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc3: + +vsrc3 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc_56f215.rst b/llvm/docs/AMDGPU/gfx12_vsrc_56f215.rst new file mode 100644 index 0000000..166da38 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc_56f215.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc_56f215: + +vsrc +==== + +Instruction input. + +*Size:* 3 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc_6802ce.rst b/llvm/docs/AMDGPU/gfx12_vsrc_6802ce.rst new file mode 100644 index 0000000..e879c2b --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc_6802ce.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc_6802ce: + +vsrc +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc_89fd7b.rst b/llvm/docs/AMDGPU/gfx12_vsrc_89fd7b.rst new file mode 100644 index 0000000..c521e72 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc_89fd7b.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc_89fd7b: + +vsrc +==== + +Instruction input. + +*Size:* 32 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc_e016a1.rst b/llvm/docs/AMDGPU/gfx12_vsrc_e016a1.rst new file mode 100644 index 0000000..84eb2ed --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc_e016a1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc_e016a1: + +vsrc +==== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc_fd235e.rst b/llvm/docs/AMDGPU/gfx12_vsrc_fd235e.rst new file mode 100644 index 0000000..640a235 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc_fd235e.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc_fd235e: + +vsrc +==== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vsrcx1.rst b/llvm/docs/AMDGPU/gfx12_vsrcx1.rst new file mode 100644 index 0000000..9dab58c --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrcx1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrcx1: + +vsrcx1 +====== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_vsrcy1.rst b/llvm/docs/AMDGPU/gfx12_vsrcy1.rst new file mode 100644 index 0000000..496b2d6 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrcy1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrcy1: + +vsrcy1 +====== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v<amdgpu_synid_v>` diff --git a/llvm/docs/AMDGPU/gfx12_waitcnt.rst b/llvm/docs/AMDGPU/gfx12_waitcnt.rst new file mode 100644 index 0000000..4541222 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_waitcnt.rst @@ -0,0 +1,55 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_waitcnt: + +waitcnt +======= + +Counts of outstanding instructions to wait for. + +The bits of this operand have the following meaning: + + ===== ================================================ ============ + Bits Description Value Range + ===== ================================================ ============ + 2:0 EXP_CNT: export and LDSDIR count. 0..7 + 3:3 Unused \- + 9:4 LGKM_CNT: LDS, GDS, Constant and Message count. 0..63 + 15:10 VM_CNT: vector memory operations count. 0..63 + ===== ================================================ ============ + +This operand may be specified as one of the following: + +* An :ref:`integer_number<amdgpu_synid_integer_number>` or an :ref:`absolute_expression<amdgpu_synid_absolute_expression>`. The value must be in the range 0..0xFFFF. +* A combination of *vmcnt*, *expcnt*, *lgkmcnt* and other values described below. + + ====================== ====================================================================== + Syntax Description + ====================== ====================================================================== + vmcnt(<*N*>) A VM_CNT value. *N* must not exceed the largest VM_CNT value. + expcnt(<*N*>) An EXP_CNT value. *N* must not exceed the largest EXP_CNT value. + lgkmcnt(<*N*>) An LGKM_CNT value. *N* must not exceed the largest LGKM_CNT value. + vmcnt_sat(<*N*>) A VM_CNT value computed as min(*N*, the largest VM_CNT value). + expcnt_sat(<*N*>) An EXP_CNT value computed as min(*N*, the largest EXP_CNT value). + lgkmcnt_sat(<*N*>) An LGKM_CNT value computed as min(*N*, the largest LGKM_CNT value). + ====================== ====================================================================== + +These values may be specified in any order. Spaces, ampersands and commas may be used as optional separators. + +*N* is either an +:ref:`integer number<amdgpu_synid_integer_number>` or an +:ref:`absolute expression<amdgpu_synid_absolute_expression>`. + +Examples: + +.. parsed-literal:: + + s_waitcnt vmcnt(1) + s_waitcnt expcnt(2) lgkmcnt(3) + s_waitcnt vmcnt(1), expcnt(2), lgkmcnt(3) + s_waitcnt vmcnt(1) & lgkmcnt_sat(100) & expcnt(2) diff --git a/llvm/docs/AMDGPUModifierSyntax.rst b/llvm/docs/AMDGPUModifierSyntax.rst index 334bdaf..8a60663 100644 --- a/llvm/docs/AMDGPUModifierSyntax.rst +++ b/llvm/docs/AMDGPUModifierSyntax.rst @@ -1078,6 +1078,73 @@ Examples: offset:0xfffff offset:-x +.. _amdgpu_synid_smem_offset24s: + +offset24s +~~~~~~~~~ + +Specifies a signed 24-bit offset, in bytes. The default value is 0. + + ============================= ==================================================================== + Syntax Description + ============================= ==================================================================== + offset:{-0x1000000..0xFFFFFF} Specifies an offset as an + :ref:`integer number <amdgpu_synid_integer_number>` + or an :ref:`absolute expression<amdgpu_synid_absolute_expression>`. + ============================= ==================================================================== + +Examples: + +.. parsed-literal:: + + offset:-1 + offset:0xfffff + offset:-x + +.. _amdgpu_synid_th: + +th +~~ + +Specifies temporal hint of memory operation. + + =============================== ========================================================= + Syntax Description + =============================== ========================================================= + TH_{LOAD|STORE}_RT Regular + TH_{LOAD|STORE}_NT Non-temporal + TH_{LOAD|STORE}_HT High-temporal + TH_{LOAD|STORE}_LU Last use. Not available in SYS scope. + TH_{LOAD|STORE}_WB Regular (CU, SE); High-temporal with write-back (MALL) + TH_{LOAD|STORE}_NT_RT Non-temporal (CU, SE); Regular (MALL) + TH_{LOAD|STORE}_RT_NT Regular (CU, SE); Non-temporal (MALL) + TH_{LOAD|STORE}_NT_HT Non-temporal (CU, SE); High-temporal (MALL) + TH_{LOAD|STORE}_NT_WB Non-temporal (CU, SE); High-temporal with write-back (MALL) + TH_{LOAD|STORE}_BYPASS Available for SYS scope only. + TH_ATOMIC_RT Regular + TH_ATOMIC_RT_RETURN Regular. For atomic instructions that return values. + TH_ATOMIC_NT Non-temporal + TH_ATOMIC_NT_RETURN Non-temporal. For atomic instructions that return values. + TH_ATOMIC_CASCADE_RT Cascading atomic; Regular. + TH_ATOMIC_CASCADE_NT Cascading atomic; Non-temporal. + =============================== ========================================================= + +.. _amdgpu_synid_scope: + +scope +~~~~~ + +Specifies scope of memory operation. + + =============================== ========================================================= + Syntax Description + =============================== ========================================================= + SCOPE_CU Coherency within a Compute Unit. + SCOPE_SE Coherency within a Shader Engine. + SCOPE_DEV Coherency within a single device. + SCOPE_SYS Coherency across the full system. + =============================== ========================================================= + VINTRP/VINTERP/LDSDIR Modifiers ------------------------------- @@ -1117,6 +1184,27 @@ The default value is zero. This is a safe value, but it may be suboptimal. issuing this instruction. ================ ====================================================== +.. _amdgpu_synid_wait_va_vdst: + +wait_va_vdst +~~~~~~~~~~~~ + +Manually specify a wait on the VA_VDST counter before issuing this instruction. VA_VDST must be less +than or equal to this value before the instruction is issued. If set to 15, no wait is performed. + +If unspecified the current default is zero. This is a safe value but may have poor performance characteristics. + +This modifier is a shorthand for the WAR hazard where VALU reads a VGPR that is written by a parameter +load. Since there is no VA_VSRC counter we must use VA_VDST as a proxy to detect when the +VALU instruction has completed: + +Examples: + +.. parsed-literal:: + + v_mov_b32 v1, v0 + ds_param_load v0, . . . wait_va_vdst:0 + .. _amdgpu_synid_wait_vdst: wait_vdst @@ -1135,6 +1223,27 @@ The default value is zero. This is a safe value, but it may be suboptimal. issuing this instruction. ================== ====================================================== +.. _amdgpu_synid_wait_vm_vsrc: + +wait_vm_vsrc +~~~~~~~~~~~~ + +Manually specify a wait on the VM_VSRC counter before issuing this instruction. VM_VSRC must be less +than or equal to this value before the instruction is issued. If set to 1, no wait is performed. + +If unspecified the current default is zero. This is a safe value but may have poor performance characteristics. + +This modifier is a shorthand for the WAR hazard where VMEM reads a VGPR that is written by a parameter +load. + +Examples: + +.. parsed-literal:: + + buffer_load_b32 v1, v0, s0, 0 + ds_param_load v0, . . . wait_vm_vsrc:0 + + DPP8 Modifiers -------------- diff --git a/llvm/docs/AMDGPUOperandSyntax.rst b/llvm/docs/AMDGPUOperandSyntax.rst index e8a7632..722290f 100644 --- a/llvm/docs/AMDGPUOperandSyntax.rst +++ b/llvm/docs/AMDGPUOperandSyntax.rst @@ -479,6 +479,7 @@ High and low 32 bits of *xnack mask* may be accessed as separate registers: .. _amdgpu_synid_vcc: .. _amdgpu_synid_vcc_lo: +.. _amdgpu_synid_vcc_hi: vcc --- @@ -523,6 +524,8 @@ including register indexing and bounds checking. =========== =================================================== .. _amdgpu_synid_exec: +.. _amdgpu_synid_exec_lo: +.. _amdgpu_synid_exec_hi: exec ---- @@ -752,6 +755,14 @@ or an :ref:`absolute expression<amdgpu_synid_absolute_expression>`. The value must be in the range -0x100000..0x0FFFFF. +.. _amdgpu_synid_simm8: + +simm8 +----- + +An 8-bit :ref:`integer number<amdgpu_synid_integer_number>` +or an :ref:`absolute expression<amdgpu_synid_absolute_expression>`. + .. _amdgpu_synid_off: off diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst index 74b7604..a4d110f 100644 --- a/llvm/docs/AMDGPUUsage.rst +++ b/llvm/docs/AMDGPUUsage.rst @@ -22,6 +22,7 @@ User Guide for AMDGPU Backend AMDGPU/AMDGPUAsmGFX1013 AMDGPU/AMDGPUAsmGFX1030 AMDGPU/AMDGPUAsmGFX11 + AMDGPU/AMDGPUAsmGFX12 AMDGPUModifierSyntax AMDGPUOperandSyntax AMDGPUInstructionSyntax @@ -19908,6 +19909,7 @@ in this description. :doc:`gfx1102<AMDGPU/AMDGPUAsmGFX11>` :doc:`gfx1103<AMDGPU/AMDGPUAsmGFX11>` + RDNA 4 :doc:`GFX12<AMDGPU/AMDGPUAsmGFX12>` :doc:`gfx1200<AMDGPU/AMDGPUAsmGFX12>` ============= ============================================= ======================================= For more information about instructions, their semantics and supported diff --git a/llvm/docs/DirectXUsage.rst b/llvm/docs/DirectXUsage.rst index 1d964e6..78f27d8 100644 --- a/llvm/docs/DirectXUsage.rst +++ b/llvm/docs/DirectXUsage.rst @@ -29,7 +29,7 @@ Initially the backend is aimed at supporting DirectX 12, and support for DirectX 11 is planned at a later date. The DirectX backend is currently experimental and is not shipped with any -release builds of LLVM tools. To enable building the DirectX backend locally add +release builds of LLVM tools. To build the DirectX backend locally, add ``DirectX`` to the ``LLVM_EXPERIMENTAL_TARGETS_TO_BUILD`` CMake option. For more information on building LLVM see the :doc:`CMake` documentation. @@ -38,7 +38,7 @@ information on building LLVM see the :doc:`CMake` documentation. Target Triples ============== -At present the DirectX target only supports the ``dxil`` architecture, which +At present, the DirectX target only supports the ``dxil`` architecture, which generates code for the `DirectX Intermediate Language. <https://github.com/microsoft/DirectXShaderCompiler/blob/main/docs/DXIL.rst>`_ @@ -46,8 +46,8 @@ In addition to target architecture, the DirectX backend also needs to know the target runtime version and pipeline stage. These are expressed using the OS and Environment triple component. -Presently the DirectX backend requires targeting the ``shadermodel`` OS, and -supports versions 6.0+ (at time of writing the latest announced version is 6.7). +Presently, the DirectX backend requires targeting the ``shadermodel`` OS, and +supports versions 6.0+ (as of writing, the latest announced version is 6.7). .. table:: DirectX Environments diff --git a/llvm/docs/GettingInvolved.rst b/llvm/docs/GettingInvolved.rst index f7e1374..4b4b09a 100644 --- a/llvm/docs/GettingInvolved.rst +++ b/llvm/docs/GettingInvolved.rst @@ -213,6 +213,16 @@ what to add to your calendar invite. - `ics <https://calendar.google.com/calendar/ical/c_fe5774fa2769c5085d6b87e8fac272e8940e7d0089bc0e0a58dc3ead7978504b%40group.calendar.google.com/public/basic.ics>`__ `gcal <https://calendar.google.com/calendar/embed?src=c_fe5774fa2769c5085d6b87e8fac272e8940e7d0089bc0e0a58dc3ead7978504b%40group.calendar.google.com&ctz=Asia%2FTokyo>`__ - `Minutes/docs <https://discourse.llvm.org/t/llvm-qualification-wg-sync-ups-meeting-minutes/87148>`__ + * - MLIR C/C++ Frontend Working Group + - Monthly, usually 1st Monday of the month + - `ics <https://calendar.google.com/calendar/ical/jvceakm3kbpku3f4jrsv1lkigo%40group.calendar.google.com/public/basic.ics>`__ + `gcal <https://calendar.google.com/calendar/embed?src=jvceakm3kbpku3f4jrsv1lkigo%40group.calendar.google.com&ctz=America%2FLos_Angeles>`__ + - `Minutes/docs <https://docs.google.com/document/d/1-flHK3TjQUrkSO2Fdt4webZ2zCyeXxpTLMiRQbMW7hE>`__ + * - ClangIR Upstreaming Coordination Meeting + - Every 2 weeks on Mondays + - `ics <https://calendar.google.com/calendar/ical/c_673c6cd64474c0aff173bf8fa609559f93d654e0984d9d91d71abd32d28c0486%40group.calendar.google.com/public/basic.ics>`__ + `gcal <https://calendar.google.com/calendar/embed?src=c_673c6cd64474c0aff173bf8fa609559f93d654e0984d9d91d71abd32d28c0486%40group.calendar.google.com&ctz=America%2FLos_Angeles>`__ + - For event owners, our Discord bot also supports sending automated announcements diff --git a/llvm/include/llvm/ADT/IntervalTree.h b/llvm/include/llvm/ADT/IntervalTree.h index 918c862..d14de06 100644 --- a/llvm/include/llvm/ADT/IntervalTree.h +++ b/llvm/include/llvm/ADT/IntervalTree.h @@ -236,8 +236,7 @@ public: //===----------------------------------------------------------------------===// // Helper class template that is used by the IntervalTree to ensure that one // does instantiate using only fundamental and/or pointer types. -template <typename T> -using PointTypeIsValid = std::bool_constant<std::is_fundamental<T>::value>; +template <typename T> using PointTypeIsValid = std::is_fundamental<T>; template <typename T> using ValueTypeIsValid = std::bool_constant<std::is_fundamental<T>::value || diff --git a/llvm/include/llvm/ADT/SmallPtrSet.h b/llvm/include/llvm/ADT/SmallPtrSet.h index e24cd641..f588a77 100644 --- a/llvm/include/llvm/ADT/SmallPtrSet.h +++ b/llvm/include/llvm/ADT/SmallPtrSet.h @@ -476,18 +476,20 @@ public: } [[nodiscard]] iterator begin() const { - if (shouldReverseIterate()) + if constexpr (shouldReverseIterate()) return makeIterator(EndPointer() - 1); - return makeIterator(CurArray); + else + return makeIterator(CurArray); } [[nodiscard]] iterator end() const { return makeIterator(EndPointer()); } private: /// Create an iterator that dereferences to same place as the given pointer. iterator makeIterator(const void *const *P) const { - if (shouldReverseIterate()) + if constexpr (shouldReverseIterate()) return iterator(P == EndPointer() ? CurArray : P + 1, CurArray, *this); - return iterator(P, EndPointer(), *this); + else + return iterator(P, EndPointer(), *this); } }; diff --git a/llvm/include/llvm/Analysis/IR2Vec.h b/llvm/include/llvm/Analysis/IR2Vec.h index 3671c1c..b7c3015 100644 --- a/llvm/include/llvm/Analysis/IR2Vec.h +++ b/llvm/include/llvm/Analysis/IR2Vec.h @@ -36,6 +36,7 @@ #define LLVM_ANALYSIS_IR2VEC_H #include "llvm/ADT/DenseMap.h" +#include "llvm/IR/Instructions.h" #include "llvm/IR/PassManager.h" #include "llvm/IR/Type.h" #include "llvm/Support/CommandLine.h" @@ -44,6 +45,7 @@ #include "llvm/Support/JSON.h" #include <array> #include <map> +#include <optional> namespace llvm { @@ -143,6 +145,73 @@ public: using InstEmbeddingsMap = DenseMap<const Instruction *, Embedding>; using BBEmbeddingsMap = DenseMap<const BasicBlock *, Embedding>; +/// Generic storage class for section-based vocabularies. +/// VocabStorage provides a generic foundation for storing and accessing +/// embeddings organized into sections. +class VocabStorage { +private: + /// Section-based storage + std::vector<std::vector<Embedding>> Sections; + + const size_t TotalSize; + const unsigned Dimension; + +public: + /// Default constructor creates empty storage (invalid state) + VocabStorage() : Sections(), TotalSize(0), Dimension(0) {} + + /// Create a VocabStorage with pre-organized section data + VocabStorage(std::vector<std::vector<Embedding>> &&SectionData); + + VocabStorage(VocabStorage &&) = default; + VocabStorage &operator=(VocabStorage &&) = delete; + + VocabStorage(const VocabStorage &) = delete; + VocabStorage &operator=(const VocabStorage &) = delete; + + /// Get total number of entries across all sections + size_t size() const { return TotalSize; } + + /// Get number of sections + unsigned getNumSections() const { + return static_cast<unsigned>(Sections.size()); + } + + /// Section-based access: Storage[sectionId][localIndex] + const std::vector<Embedding> &operator[](unsigned SectionId) const { + assert(SectionId < Sections.size() && "Invalid section ID"); + return Sections[SectionId]; + } + + /// Get vocabulary dimension + unsigned getDimension() const { return Dimension; } + + /// Check if vocabulary is valid (has data) + bool isValid() const { return TotalSize > 0; } + + /// Iterator support for section-based access + class const_iterator { + const VocabStorage *Storage; + unsigned SectionId = 0; + size_t LocalIndex = 0; + + public: + const_iterator(const VocabStorage *Storage, unsigned SectionId, + size_t LocalIndex) + : Storage(Storage), SectionId(SectionId), LocalIndex(LocalIndex) {} + + LLVM_ABI const Embedding &operator*() const; + LLVM_ABI const_iterator &operator++(); + LLVM_ABI bool operator==(const const_iterator &Other) const; + LLVM_ABI bool operator!=(const const_iterator &Other) const; + }; + + const_iterator begin() const { return const_iterator(this, 0, 0); } + const_iterator end() const { + return const_iterator(this, getNumSections(), 0); + } +}; + /// Class for storing and accessing the IR2Vec vocabulary. /// The Vocabulary class manages seed embeddings for LLVM IR entities. The /// seed embeddings are the initial learned representations of the entities @@ -162,15 +231,42 @@ using BBEmbeddingsMap = DenseMap<const BasicBlock *, Embedding>; /// embeddings. class Vocabulary { friend class llvm::IR2VecVocabAnalysis; - using VocabVector = std::vector<ir2vec::Embedding>; - VocabVector Vocab; -public: - // Slot layout: - // [0 .. MaxOpcodes-1] => Instruction opcodes - // [MaxOpcodes .. MaxOpcodes+MaxCanonicalTypeIDs-1] => Canonicalized types - // [MaxOpcodes+MaxCanonicalTypeIDs .. NumCanonicalEntries-1] => Operand kinds + // Vocabulary Layout: + // +----------------+------------------------------------------------------+ + // | Entity Type | Index Range | + // +----------------+------------------------------------------------------+ + // | Opcodes | [0 .. (MaxOpcodes-1)] | + // | Canonical Types| [MaxOpcodes .. (MaxOpcodes+MaxCanonicalTypeIDs-1)] | + // | Operands | [(MaxOpcodes+MaxCanonicalTypeIDs) .. NumCanEntries] | + // +----------------+------------------------------------------------------+ + // Note: MaxOpcodes is the number of unique opcodes supported by LLVM IR. + // MaxCanonicalTypeIDs is the number of canonicalized type IDs. + // "Similar" LLVM Types are grouped/canonicalized together. E.g., all + // float variants (FloatTy, DoubleTy, HalfTy, etc.) map to + // CanonicalTypeID::FloatTy. This helps reduce the vocabulary size + // and improves learning. Operands include Comparison predicates + // (ICmp/FCmp) along with other operand types. This can be extended to + // include other specializations in future. + enum class Section : unsigned { + Opcodes = 0, + CanonicalTypes = 1, + Operands = 2, + Predicates = 3, + MaxSections + }; + + // Use section-based storage for better organization and efficiency + VocabStorage Storage; + + static constexpr unsigned NumICmpPredicates = + static_cast<unsigned>(CmpInst::LAST_ICMP_PREDICATE) - + static_cast<unsigned>(CmpInst::FIRST_ICMP_PREDICATE) + 1; + static constexpr unsigned NumFCmpPredicates = + static_cast<unsigned>(CmpInst::LAST_FCMP_PREDICATE) - + static_cast<unsigned>(CmpInst::FIRST_FCMP_PREDICATE) + 1; +public: /// Canonical type IDs supported by IR2Vec Vocabulary enum class CanonicalTypeID : unsigned { FloatTy, @@ -207,59 +303,114 @@ public: static_cast<unsigned>(CanonicalTypeID::MaxCanonicalType); static constexpr unsigned MaxOperandKinds = static_cast<unsigned>(OperandKind::MaxOperandKind); + // CmpInst::Predicate has gaps. We want the vocabulary to be dense without + // empty slots. + static constexpr unsigned MaxPredicateKinds = + NumICmpPredicates + NumFCmpPredicates; Vocabulary() = default; - LLVM_ABI Vocabulary(VocabVector &&Vocab) : Vocab(std::move(Vocab)) {} + LLVM_ABI Vocabulary(VocabStorage &&Storage) : Storage(std::move(Storage)) {} + + Vocabulary(const Vocabulary &) = delete; + Vocabulary &operator=(const Vocabulary &) = delete; + + Vocabulary(Vocabulary &&) = default; + Vocabulary &operator=(Vocabulary &&Other) = delete; + + LLVM_ABI bool isValid() const { + return Storage.size() == NumCanonicalEntries; + } + + LLVM_ABI unsigned getDimension() const { + assert(isValid() && "IR2Vec Vocabulary is invalid"); + return Storage.getDimension(); + } - LLVM_ABI bool isValid() const { return Vocab.size() == NumCanonicalEntries; }; - LLVM_ABI unsigned getDimension() const; - /// Total number of entries (opcodes + canonicalized types + operand kinds) + /// Total number of entries (opcodes + canonicalized types + operand kinds + + /// predicates) static constexpr size_t getCanonicalSize() { return NumCanonicalEntries; } /// Function to get vocabulary key for a given Opcode LLVM_ABI static StringRef getVocabKeyForOpcode(unsigned Opcode); /// Function to get vocabulary key for a given TypeID - LLVM_ABI static StringRef getVocabKeyForTypeID(Type::TypeID TypeID); + LLVM_ABI static StringRef getVocabKeyForTypeID(Type::TypeID TypeID) { + return getVocabKeyForCanonicalTypeID(getCanonicalTypeID(TypeID)); + } /// Function to get vocabulary key for a given OperandKind - LLVM_ABI static StringRef getVocabKeyForOperandKind(OperandKind Kind); + LLVM_ABI static StringRef getVocabKeyForOperandKind(OperandKind Kind) { + unsigned Index = static_cast<unsigned>(Kind); + assert(Index < MaxOperandKinds && "Invalid OperandKind"); + return OperandKindNames[Index]; + } /// Function to classify an operand into OperandKind LLVM_ABI static OperandKind getOperandKind(const Value *Op); - /// Functions to return the slot index or position of a given Opcode, TypeID, - /// or OperandKind in the vocabulary. - LLVM_ABI static unsigned getSlotIndex(unsigned Opcode); - LLVM_ABI static unsigned getSlotIndex(Type::TypeID TypeID); - LLVM_ABI static unsigned getSlotIndex(const Value &Op); + /// Function to get vocabulary key for a given predicate + LLVM_ABI static StringRef getVocabKeyForPredicate(CmpInst::Predicate P); + + /// Functions to return flat index + LLVM_ABI static unsigned getIndex(unsigned Opcode) { + assert(Opcode >= 1 && Opcode <= MaxOpcodes && "Invalid opcode"); + return Opcode - 1; // Convert to zero-based index + } + + LLVM_ABI static unsigned getIndex(Type::TypeID TypeID) { + assert(static_cast<unsigned>(TypeID) < MaxTypeIDs && "Invalid type ID"); + return MaxOpcodes + static_cast<unsigned>(getCanonicalTypeID(TypeID)); + } + + LLVM_ABI static unsigned getIndex(const Value &Op) { + unsigned Index = static_cast<unsigned>(getOperandKind(&Op)); + assert(Index < MaxOperandKinds && "Invalid OperandKind"); + return OperandBaseOffset + Index; + } + + LLVM_ABI static unsigned getIndex(CmpInst::Predicate P) { + return PredicateBaseOffset + getPredicateLocalIndex(P); + } /// Accessors to get the embedding for a given entity. - LLVM_ABI const ir2vec::Embedding &operator[](unsigned Opcode) const; - LLVM_ABI const ir2vec::Embedding &operator[](Type::TypeID TypeId) const; - LLVM_ABI const ir2vec::Embedding &operator[](const Value &Arg) const; + LLVM_ABI const ir2vec::Embedding &operator[](unsigned Opcode) const { + assert(Opcode >= 1 && Opcode <= MaxOpcodes && "Invalid opcode"); + return Storage[static_cast<unsigned>(Section::Opcodes)][Opcode - 1]; + } + + LLVM_ABI const ir2vec::Embedding &operator[](Type::TypeID TypeID) const { + assert(static_cast<unsigned>(TypeID) < MaxTypeIDs && "Invalid type ID"); + unsigned LocalIndex = static_cast<unsigned>(getCanonicalTypeID(TypeID)); + return Storage[static_cast<unsigned>(Section::CanonicalTypes)][LocalIndex]; + } + + LLVM_ABI const ir2vec::Embedding &operator[](const Value &Arg) const { + unsigned LocalIndex = static_cast<unsigned>(getOperandKind(&Arg)); + assert(LocalIndex < MaxOperandKinds && "Invalid OperandKind"); + return Storage[static_cast<unsigned>(Section::Operands)][LocalIndex]; + } + + LLVM_ABI const ir2vec::Embedding &operator[](CmpInst::Predicate P) const { + unsigned LocalIndex = getPredicateLocalIndex(P); + return Storage[static_cast<unsigned>(Section::Predicates)][LocalIndex]; + } /// Const Iterator type aliases - using const_iterator = VocabVector::const_iterator; + using const_iterator = VocabStorage::const_iterator; + const_iterator begin() const { assert(isValid() && "IR2Vec Vocabulary is invalid"); - return Vocab.begin(); + return Storage.begin(); } - const_iterator cbegin() const { - assert(isValid() && "IR2Vec Vocabulary is invalid"); - return Vocab.cbegin(); - } + const_iterator cbegin() const { return begin(); } const_iterator end() const { assert(isValid() && "IR2Vec Vocabulary is invalid"); - return Vocab.end(); + return Storage.end(); } - const_iterator cend() const { - assert(isValid() && "IR2Vec Vocabulary is invalid"); - return Vocab.cend(); - } + const_iterator cend() const { return end(); } /// Returns the string key for a given index position in the vocabulary. /// This is useful for debugging or printing the vocabulary. Do not use this @@ -267,14 +418,24 @@ public: LLVM_ABI static StringRef getStringKey(unsigned Pos); /// Create a dummy vocabulary for testing purposes. - LLVM_ABI static VocabVector createDummyVocabForTest(unsigned Dim = 1); + LLVM_ABI static VocabStorage createDummyVocabForTest(unsigned Dim = 1); LLVM_ABI bool invalidate(Module &M, const PreservedAnalyses &PA, ModuleAnalysisManager::Invalidator &Inv) const; private: constexpr static unsigned NumCanonicalEntries = - MaxOpcodes + MaxCanonicalTypeIDs + MaxOperandKinds; + MaxOpcodes + MaxCanonicalTypeIDs + MaxOperandKinds + MaxPredicateKinds; + + // Base offsets for flat index computation + constexpr static unsigned OperandBaseOffset = + MaxOpcodes + MaxCanonicalTypeIDs; + constexpr static unsigned PredicateBaseOffset = + OperandBaseOffset + MaxOperandKinds; + + /// Functions for predicate index calculations + static unsigned getPredicateLocalIndex(CmpInst::Predicate P); + static CmpInst::Predicate getPredicateFromLocalIndex(unsigned LocalIndex); /// String mappings for CanonicalTypeID values static constexpr StringLiteral CanonicalTypeNames[] = { @@ -322,10 +483,26 @@ private: /// Function to get vocabulary key for canonical type by enum LLVM_ABI static StringRef - getVocabKeyForCanonicalTypeID(CanonicalTypeID CType); + getVocabKeyForCanonicalTypeID(CanonicalTypeID CType) { + unsigned Index = static_cast<unsigned>(CType); + assert(Index < MaxCanonicalTypeIDs && "Invalid CanonicalTypeID"); + return CanonicalTypeNames[Index]; + } /// Function to convert TypeID to CanonicalTypeID - LLVM_ABI static CanonicalTypeID getCanonicalTypeID(Type::TypeID TypeID); + LLVM_ABI static CanonicalTypeID getCanonicalTypeID(Type::TypeID TypeID) { + unsigned Index = static_cast<unsigned>(TypeID); + assert(Index < MaxTypeIDs && "Invalid TypeID"); + return TypeIDMapping[Index]; + } + + /// Function to get the predicate enum value for a given index. Index is + /// relative to the predicates section of the vocabulary. E.g., Index 0 + /// corresponds to the first predicate. + LLVM_ABI static CmpInst::Predicate getPredicate(unsigned Index) { + assert(Index < MaxPredicateKinds && "Invalid predicate index"); + return getPredicateFromLocalIndex(Index); + } }; /// Embedder provides the interface to generate embeddings (vector @@ -418,22 +595,22 @@ public: /// mapping between an entity of the IR (like opcode, type, argument, etc.) and /// its corresponding embedding. class IR2VecVocabAnalysis : public AnalysisInfoMixin<IR2VecVocabAnalysis> { - using VocabVector = std::vector<ir2vec::Embedding>; using VocabMap = std::map<std::string, ir2vec::Embedding>; - VocabMap OpcVocab, TypeVocab, ArgVocab; - VocabVector Vocab; + std::optional<ir2vec::VocabStorage> Vocab; - Error readVocabulary(); + Error readVocabulary(VocabMap &OpcVocab, VocabMap &TypeVocab, + VocabMap &ArgVocab); Error parseVocabSection(StringRef Key, const json::Value &ParsedVocabValue, VocabMap &TargetVocab, unsigned &Dim); - void generateNumMappedVocab(); + void generateVocabStorage(VocabMap &OpcVocab, VocabMap &TypeVocab, + VocabMap &ArgVocab); void emitError(Error Err, LLVMContext &Ctx); public: LLVM_ABI static AnalysisKey Key; IR2VecVocabAnalysis() = default; - LLVM_ABI explicit IR2VecVocabAnalysis(const VocabVector &Vocab); - LLVM_ABI explicit IR2VecVocabAnalysis(VocabVector &&Vocab); + LLVM_ABI explicit IR2VecVocabAnalysis(ir2vec::VocabStorage &&Vocab) + : Vocab(std::move(Vocab)) {} using Result = ir2vec::Vocabulary; LLVM_ABI Result run(Module &M, ModuleAnalysisManager &MAM); }; diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h index 7a45ae9..164b46b 100644 --- a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h +++ b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h @@ -184,6 +184,7 @@ m_scev_PtrToInt(const Op0_t &Op0) { /// Match a binary SCEV. template <typename SCEVTy, typename Op0_t, typename Op1_t, + SCEV::NoWrapFlags WrapFlags = SCEV::FlagAnyWrap, bool Commutable = false> struct SCEVBinaryExpr_match { Op0_t Op0; @@ -192,6 +193,10 @@ struct SCEVBinaryExpr_match { SCEVBinaryExpr_match(Op0_t Op0, Op1_t Op1) : Op0(Op0), Op1(Op1) {} bool match(const SCEV *S) const { + if (auto WrappingS = dyn_cast<SCEVNAryExpr>(S)) + if (WrappingS->getNoWrapFlags(WrapFlags) != WrapFlags) + return false; + auto *E = dyn_cast<SCEVTy>(S); return E && E->getNumOperands() == 2 && ((Op0.match(E->getOperand(0)) && Op1.match(E->getOperand(1))) || @@ -201,10 +206,12 @@ struct SCEVBinaryExpr_match { }; template <typename SCEVTy, typename Op0_t, typename Op1_t, + SCEV::NoWrapFlags WrapFlags = SCEV::FlagAnyWrap, bool Commutable = false> -inline SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t, Commutable> +inline SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t, WrapFlags, Commutable> m_scev_Binary(const Op0_t &Op0, const Op1_t &Op1) { - return SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t, Commutable>(Op0, Op1); + return SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t, WrapFlags, Commutable>(Op0, + Op1); } template <typename Op0_t, typename Op1_t> @@ -220,9 +227,17 @@ m_scev_Mul(const Op0_t &Op0, const Op1_t &Op1) { } template <typename Op0_t, typename Op1_t> -inline SCEVBinaryExpr_match<SCEVMulExpr, Op0_t, Op1_t, true> +inline SCEVBinaryExpr_match<SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true> m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1) { - return m_scev_Binary<SCEVMulExpr, Op0_t, Op1_t, true>(Op0, Op1); + return m_scev_Binary<SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true>(Op0, + Op1); +} + +template <typename Op0_t, typename Op1_t> +inline SCEVBinaryExpr_match<SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagNUW, true> +m_scev_c_NUWMul(const Op0_t &Op0, const Op1_t &Op1) { + return m_scev_Binary<SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagNUW, true>(Op0, + Op1); } template <typename Op0_t, typename Op1_t> diff --git a/llvm/include/llvm/BinaryFormat/DXContainer.h b/llvm/include/llvm/BinaryFormat/DXContainer.h index 08a7ddb..8944e736 100644 --- a/llvm/include/llvm/BinaryFormat/DXContainer.h +++ b/llvm/include/llvm/BinaryFormat/DXContainer.h @@ -844,6 +844,7 @@ struct StaticSampler : public v1::StaticSampler { enum class RootSignatureVersion { V1_0 = 0x1, V1_1 = 0x2, + V1_2 = 0x3, }; } // namespace dxbc diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index c45e03a..7bbad17 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -480,13 +480,6 @@ public: return true; } - /// Return true if the @llvm.vector.partial.reduce.* intrinsic - /// should be expanded using generic code in SelectionDAGBuilder. - virtual bool - shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const { - return true; - } - /// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded /// using generic code in SelectionDAGBuilder. virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const { diff --git a/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h b/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h index 87777fd..edee6a7 100644 --- a/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h +++ b/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h @@ -56,7 +56,8 @@ struct RootDescriptor { return; } - assert(Version == llvm::dxbc::RootSignatureVersion::V1_1 && + assert((Version == llvm::dxbc::RootSignatureVersion::V1_1 || + Version == llvm::dxbc::RootSignatureVersion::V1_2) && "Specified an invalid root signature version"); switch (Type) { case dxil::ResourceClass::CBuffer: @@ -100,7 +101,8 @@ struct DescriptorTableClause { return; } - assert(Version == dxbc::RootSignatureVersion::V1_1 && + assert((Version == dxbc::RootSignatureVersion::V1_1 || + Version == dxbc::RootSignatureVersion::V1_2) && "Specified an invalid root signature version"); switch (Type) { case dxil::ResourceClass::CBuffer: @@ -131,6 +133,7 @@ struct StaticSampler { float MaxLOD = std::numeric_limits<float>::max(); uint32_t Space = 0; dxbc::ShaderVisibility Visibility = dxbc::ShaderVisibility::All; + dxbc::StaticSamplerFlags Flags = dxbc::StaticSamplerFlags::None; }; /// Models RootElement : RootFlags | RootConstants | RootParam diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h index 95a0a7f..de7a237 100644 --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -32,6 +32,7 @@ #include "llvm/IR/Instruction.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/OperandTraits.h" +#include "llvm/IR/ProfDataUtils.h" #include "llvm/IR/Use.h" #include "llvm/IR/User.h" #include "llvm/Support/AtomicOrdering.h" @@ -3536,8 +3537,6 @@ class SwitchInstProfUpdateWrapper { bool Changed = false; protected: - LLVM_ABI MDNode *buildProfBranchWeightsMD(); - LLVM_ABI void init(); public: @@ -3549,8 +3548,8 @@ public: SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } ~SwitchInstProfUpdateWrapper() { - if (Changed) - SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); + if (Changed && Weights.has_value() && Weights->size() >= 2) + setBranchWeights(SI, Weights.value(), /*IsExpected=*/false); } /// Delegate the call to the underlying SwitchInst::removeCase() and remove diff --git a/llvm/include/llvm/IR/ProfDataUtils.h b/llvm/include/llvm/IR/ProfDataUtils.h index e97160e..a0876b1 100644 --- a/llvm/include/llvm/IR/ProfDataUtils.h +++ b/llvm/include/llvm/IR/ProfDataUtils.h @@ -145,7 +145,13 @@ LLVM_ABI bool extractProfTotalWeight(const Instruction &I, /// \param Weights an array of weights to set on instruction I. /// \param IsExpected were these weights added from an llvm.expect* intrinsic. LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef<uint32_t> Weights, - bool IsExpected); + bool IsExpected, bool ElideAllZero = false); + +/// Variant of `setBranchWeights` where the `Weights` will be fit first to +/// uint32_t by shifting right. +LLVM_ABI void setFittedBranchWeights(Instruction &I, ArrayRef<uint64_t> Weights, + bool IsExpected, + bool ElideAllZero = false); /// downscale the given weights preserving the ratio. If the maximum value is /// not already known and not provided via \param KnownMaxCount , it will be diff --git a/llvm/include/llvm/IR/ValueMap.h b/llvm/include/llvm/IR/ValueMap.h index 1a11718..97653c2 100644 --- a/llvm/include/llvm/IR/ValueMap.h +++ b/llvm/include/llvm/IR/ValueMap.h @@ -42,18 +42,15 @@ namespace llvm { -template<typename KeyT, typename ValueT, typename Config> +template <typename KeyT, typename ValueT, typename Config> class ValueMapCallbackVH; -template<typename DenseMapT, typename KeyT> -class ValueMapIterator; -template<typename DenseMapT, typename KeyT> -class ValueMapConstIterator; +template <typename DenseMapT, typename KeyT> class ValueMapIterator; +template <typename DenseMapT, typename KeyT> class ValueMapConstIterator; /// This class defines the default behavior for configurable aspects of /// ValueMap<>. User Configs should inherit from this class to be as compatible /// as possible with future versions of ValueMap. -template<typename KeyT, typename MutexT = sys::Mutex> -struct ValueMapConfig { +template <typename KeyT, typename MutexT = sys::Mutex> struct ValueMapConfig { using mutex_type = MutexT; /// If FollowRAUW is true, the ValueMap will update mappings on RAUW. If it's @@ -66,21 +63,24 @@ struct ValueMapConfig { // override all the defaults. struct ExtraData {}; - template<typename ExtraDataT> + template <typename ExtraDataT> static void onRAUW(const ExtraDataT & /*Data*/, KeyT /*Old*/, KeyT /*New*/) {} - template<typename ExtraDataT> - static void onDelete(const ExtraDataT &/*Data*/, KeyT /*Old*/) {} + template <typename ExtraDataT> + static void onDelete(const ExtraDataT & /*Data*/, KeyT /*Old*/) {} /// Returns a mutex that should be acquired around any changes to the map. /// This is only acquired from the CallbackVH (and held around calls to onRAUW /// and onDelete) and not inside other ValueMap methods. NULL means that no /// mutex is necessary. - template<typename ExtraDataT> - static mutex_type *getMutex(const ExtraDataT &/*Data*/) { return nullptr; } + template <typename ExtraDataT> + static mutex_type *getMutex(const ExtraDataT & /*Data*/) { + return nullptr; + } }; /// See the file comment. -template<typename KeyT, typename ValueT, typename Config =ValueMapConfig<KeyT>> +template <typename KeyT, typename ValueT, + typename Config = ValueMapConfig<KeyT>> class ValueMap { friend class ValueMapCallbackVH<KeyT, ValueT, Config>; @@ -157,9 +157,7 @@ public: return Map.find_as(Val) == Map.end() ? 0 : 1; } - iterator find(const KeyT &Val) { - return iterator(Map.find_as(Val)); - } + iterator find(const KeyT &Val) { return iterator(Map.find_as(Val)); } const_iterator find(const KeyT &Val) const { return const_iterator(Map.find_as(Val)); } @@ -186,8 +184,7 @@ public: } /// insert - Range insertion of pairs. - template<typename InputIt> - void insert(InputIt I, InputIt E) { + template <typename InputIt> void insert(InputIt I, InputIt E) { for (; I != E; ++I) insert(*I); } @@ -200,17 +197,13 @@ public: Map.erase(I); return true; } - void erase(iterator I) { - return Map.erase(I.base()); - } + void erase(iterator I) { return Map.erase(I.base()); } - value_type& FindAndConstruct(const KeyT &Key) { + value_type &FindAndConstruct(const KeyT &Key) { return Map.FindAndConstruct(Wrap(Key)); } - ValueT &operator[](const KeyT &Key) { - return Map[Wrap(Key)]; - } + ValueT &operator[](const KeyT &Key) { return Map[Wrap(Key)]; } /// isPointerIntoBucketsArray - Return true if the specified pointer points /// somewhere into the ValueMap's array of buckets (i.e. either to a key or @@ -235,7 +228,7 @@ private: // the const_cast incorrect) is if it gets inserted into the map. But then // this function must have been called from a non-const method, making the // const_cast ok. - return ValueMapCVH(key, const_cast<ValueMap*>(this)); + return ValueMapCVH(key, const_cast<ValueMap *>(this)); } }; @@ -252,7 +245,7 @@ class ValueMapCallbackVH final : public CallbackVH { ValueMapT *Map; ValueMapCallbackVH(KeyT Key, ValueMapT *Map) - : CallbackVH(const_cast<Value*>(static_cast<const Value*>(Key))), + : CallbackVH(const_cast<Value *>(static_cast<const Value *>(Key))), Map(Map) {} // Private constructor used to create empty/tombstone DenseMap keys. @@ -268,8 +261,8 @@ public: std::unique_lock<typename Config::mutex_type> Guard; if (M) Guard = std::unique_lock<typename Config::mutex_type>(*M); - Config::onDelete(Copy.Map->Data, Copy.Unwrap()); // May destroy *this. - Copy.Map->Map.erase(Copy); // Definitely destroys *this. + Config::onDelete(Copy.Map->Data, Copy.Unwrap()); // May destroy *this. + Copy.Map->Map.erase(Copy); // Definitely destroys *this. } void allUsesReplacedWith(Value *new_key) override { @@ -291,14 +284,14 @@ public: // removed the old mapping. if (I != Copy.Map->Map.end()) { ValueT Target(std::move(I->second)); - Copy.Map->Map.erase(I); // Definitely destroys *this. + Copy.Map->Map.erase(I); // Definitely destroys *this. Copy.Map->insert(std::make_pair(typed_new_key, std::move(Target))); } } } }; -template<typename KeyT, typename ValueT, typename Config> +template <typename KeyT, typename ValueT, typename Config> struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config>> { using VH = ValueMapCallbackVH<KeyT, ValueT, Config>; @@ -318,9 +311,7 @@ struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config>> { return DenseMapInfo<KeyT>::getHashValue(Val); } - static bool isEqual(const VH &LHS, const VH &RHS) { - return LHS == RHS; - } + static bool isEqual(const VH &LHS, const VH &RHS) { return LHS == RHS; } static bool isEqual(const KeyT &LHS, const VH &RHS) { return LHS == RHS.getValPtr(); @@ -347,7 +338,7 @@ public: struct ValueTypeProxy { const KeyT first; - ValueT& second; + ValueT &second; ValueTypeProxy *operator->() { return this; } @@ -361,23 +352,19 @@ public: return Result; } - ValueTypeProxy operator->() const { - return operator*(); - } + ValueTypeProxy operator->() const { return operator*(); } - bool operator==(const ValueMapIterator &RHS) const { - return I == RHS.I; - } - bool operator!=(const ValueMapIterator &RHS) const { - return I != RHS.I; - } + bool operator==(const ValueMapIterator &RHS) const { return I == RHS.I; } + bool operator!=(const ValueMapIterator &RHS) const { return I != RHS.I; } - inline ValueMapIterator& operator++() { // Preincrement + inline ValueMapIterator &operator++() { // Preincrement ++I; return *this; } - ValueMapIterator operator++(int) { // Postincrement - ValueMapIterator tmp = *this; ++*this; return tmp; + ValueMapIterator operator++(int) { // Postincrement + ValueMapIterator tmp = *this; + ++*this; + return tmp; } }; @@ -397,13 +384,13 @@ public: ValueMapConstIterator() : I() {} ValueMapConstIterator(BaseT I) : I(I) {} ValueMapConstIterator(ValueMapIterator<DenseMapT, KeyT> Other) - : I(Other.base()) {} + : I(Other.base()) {} BaseT base() const { return I; } struct ValueTypeProxy { const KeyT first; - const ValueT& second; + const ValueT &second; ValueTypeProxy *operator->() { return this; } operator std::pair<KeyT, ValueT>() const { return std::make_pair(first, second); @@ -415,23 +402,19 @@ public: return Result; } - ValueTypeProxy operator->() const { - return operator*(); - } + ValueTypeProxy operator->() const { return operator*(); } - bool operator==(const ValueMapConstIterator &RHS) const { - return I == RHS.I; - } - bool operator!=(const ValueMapConstIterator &RHS) const { - return I != RHS.I; - } + bool operator==(const ValueMapConstIterator &RHS) const { return I == RHS.I; } + bool operator!=(const ValueMapConstIterator &RHS) const { return I != RHS.I; } - inline ValueMapConstIterator& operator++() { // Preincrement + inline ValueMapConstIterator &operator++() { // Preincrement ++I; return *this; } - ValueMapConstIterator operator++(int) { // Postincrement - ValueMapConstIterator tmp = *this; ++*this; return tmp; + ValueMapConstIterator operator++(int) { // Postincrement + ValueMapConstIterator tmp = *this; + ++*this; + return tmp; } }; diff --git a/llvm/include/llvm/Support/FileSystem.h b/llvm/include/llvm/Support/FileSystem.h index c203779..cf2a810 100644 --- a/llvm/include/llvm/Support/FileSystem.h +++ b/llvm/include/llvm/Support/FileSystem.h @@ -268,18 +268,6 @@ public: /// Make \a path an absolute path. /// -/// Makes \a path absolute using the \a current_directory if it is not already. -/// An empty \a path will result in the \a current_directory. -/// -/// /absolute/path => /absolute/path -/// relative/../path => <current-directory>/relative/../path -/// -/// @param path A path that is modified to be an absolute path. -LLVM_ABI void make_absolute(const Twine ¤t_directory, - SmallVectorImpl<char> &path); - -/// Make \a path an absolute path. -/// /// Makes \a path absolute using the current directory if it is not already. An /// empty \a path will result in the current directory. /// diff --git a/llvm/include/llvm/Support/Format.h b/llvm/include/llvm/Support/Format.h index 2553002..34b224d 100644 --- a/llvm/include/llvm/Support/Format.h +++ b/llvm/include/llvm/Support/Format.h @@ -78,16 +78,6 @@ public: /// printed, this synthesizes the string into a temporary buffer provided and /// returns whether or not it is big enough. -// Helper to validate that format() parameters are scalars or pointers. -template <typename... Args> struct validate_format_parameters; -template <typename Arg, typename... Args> -struct validate_format_parameters<Arg, Args...> { - static_assert(std::is_scalar_v<Arg>, - "format can't be used with non fundamental / non pointer type"); - validate_format_parameters() { validate_format_parameters<Args...>(); } -}; -template <> struct validate_format_parameters<> {}; - template <typename... Ts> class format_object final : public format_object_base { std::tuple<Ts...> Vals; @@ -105,7 +95,9 @@ class format_object final : public format_object_base { public: format_object(const char *fmt, const Ts &... vals) : format_object_base(fmt), Vals(vals...) { - validate_format_parameters<Ts...>(); + static_assert( + (std::is_scalar_v<Ts> && ...), + "format can't be used with non fundamental / non pointer type"); } int snprint(char *Buffer, unsigned BufferSize) const override { diff --git a/llvm/include/llvm/Support/FormatProviders.h b/llvm/include/llvm/Support/FormatProviders.h index 9147782..8eaa5e38 100644 --- a/llvm/include/llvm/Support/FormatProviders.h +++ b/llvm/include/llvm/Support/FormatProviders.h @@ -29,22 +29,18 @@ namespace support { namespace detail { template <typename T> struct use_integral_formatter - : public std::bool_constant< - is_one_of<T, uint8_t, int16_t, uint16_t, int32_t, uint32_t, int64_t, - uint64_t, int, unsigned, long, unsigned long, long long, - unsigned long long>::value> {}; + : public is_one_of<T, uint8_t, int16_t, uint16_t, int32_t, uint32_t, + int64_t, uint64_t, int, unsigned, long, unsigned long, + long long, unsigned long long> {}; template <typename T> -struct use_char_formatter : public std::bool_constant<std::is_same_v<T, char>> { -}; +struct use_char_formatter : public std::is_same<T, char> {}; template <typename T> -struct is_cstring - : public std::bool_constant<is_one_of<T, char *, const char *>::value> {}; +struct is_cstring : public is_one_of<T, char *, const char *> {}; template <typename T> -struct use_string_formatter - : public std::bool_constant<std::is_convertible_v<T, llvm::StringRef>> {}; +struct use_string_formatter : public std::is_convertible<T, llvm::StringRef> {}; template <typename T> struct use_pointer_formatter @@ -52,8 +48,7 @@ struct use_pointer_formatter }; template <typename T> -struct use_double_formatter - : public std::bool_constant<std::is_floating_point_v<T>> {}; +struct use_double_formatter : public std::is_floating_point<T> {}; class HelperFunctions { protected: diff --git a/llvm/include/llvm/Support/FormatVariadicDetails.h b/llvm/include/llvm/Support/FormatVariadicDetails.h index 4002caf..0fdc7b6 100644 --- a/llvm/include/llvm/Support/FormatVariadicDetails.h +++ b/llvm/include/llvm/Support/FormatVariadicDetails.h @@ -92,8 +92,7 @@ public: // based format() invocation. template <typename T> struct uses_format_member - : public std::bool_constant< - std::is_base_of_v<format_adapter, std::remove_reference_t<T>>> {}; + : public std::is_base_of<format_adapter, std::remove_reference_t<T>> {}; // Simple template that decides whether a type T should use the format_provider // based format() invocation. The member function takes priority, so this test diff --git a/llvm/include/llvm/Support/HashBuilder.h b/llvm/include/llvm/Support/HashBuilder.h index ae266d3..d0130d6 100644 --- a/llvm/include/llvm/Support/HashBuilder.h +++ b/llvm/include/llvm/Support/HashBuilder.h @@ -31,8 +31,7 @@ namespace llvm { namespace hashbuilder_detail { /// Trait to indicate whether a type's bits can be hashed directly (after /// endianness correction). -template <typename U> -struct IsHashableData : std::bool_constant<is_integral_or_enum<U>::value> {}; +template <typename U> struct IsHashableData : is_integral_or_enum<U> {}; } // namespace hashbuilder_detail diff --git a/llvm/include/llvm/Support/InstructionCost.h b/llvm/include/llvm/Support/InstructionCost.h index ab1c8eb..507c166 100644 --- a/llvm/include/llvm/Support/InstructionCost.h +++ b/llvm/include/llvm/Support/InstructionCost.h @@ -59,8 +59,8 @@ private: State = Invalid; } - static CostType getMaxValue() { return std::numeric_limits<CostType>::max(); } - static CostType getMinValue() { return std::numeric_limits<CostType>::min(); } + static constexpr CostType MaxValue = std::numeric_limits<CostType>::max(); + static constexpr CostType MinValue = std::numeric_limits<CostType>::min(); public: // A default constructed InstructionCost is a valid zero cost @@ -69,8 +69,8 @@ public: InstructionCost(CostState) = delete; InstructionCost(CostType Val) : Value(Val), State(Valid) {} - static InstructionCost getMax() { return getMaxValue(); } - static InstructionCost getMin() { return getMinValue(); } + static InstructionCost getMax() { return MaxValue; } + static InstructionCost getMin() { return MinValue; } static InstructionCost getInvalid(CostType Val = 0) { InstructionCost Tmp(Val); Tmp.setInvalid(); @@ -102,7 +102,7 @@ public: // Saturating addition. InstructionCost::CostType Result; if (AddOverflow(Value, RHS.Value, Result)) - Result = RHS.Value > 0 ? getMaxValue() : getMinValue(); + Result = RHS.Value > 0 ? MaxValue : MinValue; Value = Result; return *this; @@ -120,7 +120,7 @@ public: // Saturating subtract. InstructionCost::CostType Result; if (SubOverflow(Value, RHS.Value, Result)) - Result = RHS.Value > 0 ? getMinValue() : getMaxValue(); + Result = RHS.Value > 0 ? MinValue : MaxValue; Value = Result; return *this; } @@ -138,9 +138,9 @@ public: InstructionCost::CostType Result; if (MulOverflow(Value, RHS.Value, Result)) { if ((Value > 0 && RHS.Value > 0) || (Value < 0 && RHS.Value < 0)) - Result = getMaxValue(); + Result = MaxValue; else - Result = getMinValue(); + Result = MinValue; } Value = Result; diff --git a/llvm/include/llvm/Support/Path.h b/llvm/include/llvm/Support/Path.h index 0cb5171..a8e0f33 100644 --- a/llvm/include/llvm/Support/Path.h +++ b/llvm/include/llvm/Support/Path.h @@ -566,6 +566,18 @@ LLVM_ABI bool is_absolute_gnu(const Twine &path, Style style = Style::native); /// @result True if the path is relative, false if it is not. LLVM_ABI bool is_relative(const Twine &path, Style style = Style::native); +/// Make \a path an absolute path. +/// +/// Makes \a path absolute using the \a current_directory if it is not already. +/// An empty \a path will result in the \a current_directory. +/// +/// /absolute/path => /absolute/path +/// relative/../path => <current-directory>/relative/../path +/// +/// @param path A path that is modified to be an absolute path. +LLVM_ABI void make_absolute(const Twine ¤t_directory, + SmallVectorImpl<char> &path); + } // end namespace path } // end namespace sys } // end namespace llvm diff --git a/llvm/include/llvm/Target/TargetMachine.h b/llvm/include/llvm/Target/TargetMachine.h index bf4e490..d0fd483 100644 --- a/llvm/include/llvm/Target/TargetMachine.h +++ b/llvm/include/llvm/Target/TargetMachine.h @@ -29,10 +29,10 @@ #include <string> #include <utility> -LLVM_ABI extern llvm::cl::opt<bool> NoKernelInfoEndLTO; - namespace llvm { +LLVM_ABI extern llvm::cl::opt<bool> NoKernelInfoEndLTO; + class AAManager; using ModulePassManager = PassManager<Module>; diff --git a/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h b/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h index 6178622..dfd6e2f 100644 --- a/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h +++ b/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h @@ -15,7 +15,12 @@ namespace llvm { class Function; -struct JumpTableToSwitchPass : PassInfoMixin<JumpTableToSwitchPass> { +class JumpTableToSwitchPass : public PassInfoMixin<JumpTableToSwitchPass> { + // Necessary until we switch to GUIDs as metadata, after which we can drop it. + const bool InLTO; + +public: + explicit JumpTableToSwitchPass(bool InLTO = false) : InLTO(InLTO) {} /// Run the pass over the function. PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); }; diff --git a/llvm/lib/Analysis/CtxProfAnalysis.cpp b/llvm/lib/Analysis/CtxProfAnalysis.cpp index a363bce..c4abec0 100644 --- a/llvm/lib/Analysis/CtxProfAnalysis.cpp +++ b/llvm/lib/Analysis/CtxProfAnalysis.cpp @@ -30,6 +30,9 @@ #define DEBUG_TYPE "ctx_prof" using namespace llvm; + +namespace llvm { + cl::opt<std::string> UseCtxProfile("use-ctx-profile", cl::init(""), cl::Hidden, cl::desc("Use the specified contextual profile file")); @@ -50,7 +53,6 @@ static cl::opt<bool> ForceIsInSpecializedModule( const char *AssignGUIDPass::GUIDMetadataName = "guid"; -namespace llvm { class ProfileAnnotatorImpl final { friend class ProfileAnnotator; class BBInfo; diff --git a/llvm/lib/Analysis/IR2Vec.cpp b/llvm/lib/Analysis/IR2Vec.cpp index 99afc06..af30422 100644 --- a/llvm/lib/Analysis/IR2Vec.cpp +++ b/llvm/lib/Analysis/IR2Vec.cpp @@ -15,6 +15,7 @@ #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/Sequence.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Module.h" @@ -216,6 +217,8 @@ void SymbolicEmbedder::computeEmbeddings(const BasicBlock &BB) const { ArgEmb += Vocab[*Op]; auto InstVector = Vocab[I.getOpcode()] + Vocab[I.getType()->getTypeID()] + ArgEmb; + if (const auto *IC = dyn_cast<CmpInst>(&I)) + InstVector += Vocab[IC->getPredicate()]; InstVecMap[&I] = InstVector; BBVector += InstVector; } @@ -250,6 +253,9 @@ void FlowAwareEmbedder::computeEmbeddings(const BasicBlock &BB) const { // embeddings auto InstVector = Vocab[I.getOpcode()] + Vocab[I.getType()->getTypeID()] + ArgEmb; + // Add compare predicate embedding as an additional operand if applicable + if (const auto *IC = dyn_cast<CmpInst>(&I)) + InstVector += Vocab[IC->getPredicate()]; InstVecMap[&I] = InstVector; BBVector += InstVector; } @@ -257,41 +263,76 @@ void FlowAwareEmbedder::computeEmbeddings(const BasicBlock &BB) const { } // ==----------------------------------------------------------------------===// -// Vocabulary +// VocabStorage //===----------------------------------------------------------------------===// -unsigned Vocabulary::getDimension() const { - assert(isValid() && "IR2Vec Vocabulary is invalid"); - return Vocab[0].size(); -} - -unsigned Vocabulary::getSlotIndex(unsigned Opcode) { - assert(Opcode >= 1 && Opcode <= MaxOpcodes && "Invalid opcode"); - return Opcode - 1; // Convert to zero-based index -} - -unsigned Vocabulary::getSlotIndex(Type::TypeID TypeID) { - assert(static_cast<unsigned>(TypeID) < MaxTypeIDs && "Invalid type ID"); - return MaxOpcodes + static_cast<unsigned>(getCanonicalTypeID(TypeID)); -} - -unsigned Vocabulary::getSlotIndex(const Value &Op) { - unsigned Index = static_cast<unsigned>(getOperandKind(&Op)); - assert(Index < MaxOperandKinds && "Invalid OperandKind"); - return MaxOpcodes + MaxCanonicalTypeIDs + Index; +VocabStorage::VocabStorage(std::vector<std::vector<Embedding>> &&SectionData) + : Sections(std::move(SectionData)), TotalSize([&] { + assert(!Sections.empty() && "Vocabulary has no sections"); + // Compute total size across all sections + size_t Size = 0; + for (const auto &Section : Sections) { + assert(!Section.empty() && "Vocabulary section is empty"); + Size += Section.size(); + } + return Size; + }()), + Dimension([&] { + // Get dimension from the first embedding in the first section - all + // embeddings must have the same dimension + assert(!Sections.empty() && "Vocabulary has no sections"); + assert(!Sections[0].empty() && "First section of vocabulary is empty"); + unsigned ExpectedDim = static_cast<unsigned>(Sections[0][0].size()); + + // Verify that all embeddings across all sections have the same + // dimension + [[maybe_unused]] auto allSameDim = + [ExpectedDim](const std::vector<Embedding> &Section) { + return std::all_of(Section.begin(), Section.end(), + [ExpectedDim](const Embedding &Emb) { + return Emb.size() == ExpectedDim; + }); + }; + assert(std::all_of(Sections.begin(), Sections.end(), allSameDim) && + "All embeddings must have the same dimension"); + + return ExpectedDim; + }()) {} + +const Embedding &VocabStorage::const_iterator::operator*() const { + assert(SectionId < Storage->Sections.size() && "Invalid section ID"); + assert(LocalIndex < Storage->Sections[SectionId].size() && + "Local index out of range"); + return Storage->Sections[SectionId][LocalIndex]; +} + +VocabStorage::const_iterator &VocabStorage::const_iterator::operator++() { + ++LocalIndex; + // Check if we need to move to the next section + if (SectionId < Storage->getNumSections() && + LocalIndex >= Storage->Sections[SectionId].size()) { + assert(LocalIndex == Storage->Sections[SectionId].size() && + "Local index should be at the end of the current section"); + LocalIndex = 0; + ++SectionId; + } + return *this; } -const Embedding &Vocabulary::operator[](unsigned Opcode) const { - return Vocab[getSlotIndex(Opcode)]; +bool VocabStorage::const_iterator::operator==( + const const_iterator &Other) const { + return Storage == Other.Storage && SectionId == Other.SectionId && + LocalIndex == Other.LocalIndex; } -const Embedding &Vocabulary::operator[](Type::TypeID TypeID) const { - return Vocab[getSlotIndex(TypeID)]; +bool VocabStorage::const_iterator::operator!=( + const const_iterator &Other) const { + return !(*this == Other); } -const ir2vec::Embedding &Vocabulary::operator[](const Value &Arg) const { - return Vocab[getSlotIndex(Arg)]; -} +// ==----------------------------------------------------------------------===// +// Vocabulary +//===----------------------------------------------------------------------===// StringRef Vocabulary::getVocabKeyForOpcode(unsigned Opcode) { assert(Opcode >= 1 && Opcode <= MaxOpcodes && "Invalid opcode"); @@ -304,29 +345,6 @@ StringRef Vocabulary::getVocabKeyForOpcode(unsigned Opcode) { return "UnknownOpcode"; } -StringRef Vocabulary::getVocabKeyForCanonicalTypeID(CanonicalTypeID CType) { - unsigned Index = static_cast<unsigned>(CType); - assert(Index < MaxCanonicalTypeIDs && "Invalid CanonicalTypeID"); - return CanonicalTypeNames[Index]; -} - -Vocabulary::CanonicalTypeID -Vocabulary::getCanonicalTypeID(Type::TypeID TypeID) { - unsigned Index = static_cast<unsigned>(TypeID); - assert(Index < MaxTypeIDs && "Invalid TypeID"); - return TypeIDMapping[Index]; -} - -StringRef Vocabulary::getVocabKeyForTypeID(Type::TypeID TypeID) { - return getVocabKeyForCanonicalTypeID(getCanonicalTypeID(TypeID)); -} - -StringRef Vocabulary::getVocabKeyForOperandKind(Vocabulary::OperandKind Kind) { - unsigned Index = static_cast<unsigned>(Kind); - assert(Index < MaxOperandKinds && "Invalid OperandKind"); - return OperandKindNames[Index]; -} - // Helper function to classify an operand into OperandKind Vocabulary::OperandKind Vocabulary::getOperandKind(const Value *Op) { if (isa<Function>(Op)) @@ -338,18 +356,50 @@ Vocabulary::OperandKind Vocabulary::getOperandKind(const Value *Op) { return OperandKind::VariableID; } +unsigned Vocabulary::getPredicateLocalIndex(CmpInst::Predicate P) { + if (P >= CmpInst::FIRST_FCMP_PREDICATE && P <= CmpInst::LAST_FCMP_PREDICATE) + return P - CmpInst::FIRST_FCMP_PREDICATE; + else + return P - CmpInst::FIRST_ICMP_PREDICATE + + (CmpInst::LAST_FCMP_PREDICATE - CmpInst::FIRST_FCMP_PREDICATE + 1); +} + +CmpInst::Predicate Vocabulary::getPredicateFromLocalIndex(unsigned LocalIndex) { + unsigned fcmpRange = + CmpInst::LAST_FCMP_PREDICATE - CmpInst::FIRST_FCMP_PREDICATE + 1; + if (LocalIndex < fcmpRange) + return static_cast<CmpInst::Predicate>(CmpInst::FIRST_FCMP_PREDICATE + + LocalIndex); + else + return static_cast<CmpInst::Predicate>(CmpInst::FIRST_ICMP_PREDICATE + + LocalIndex - fcmpRange); +} + +StringRef Vocabulary::getVocabKeyForPredicate(CmpInst::Predicate Pred) { + static SmallString<16> PredNameBuffer; + if (Pred < CmpInst::FIRST_ICMP_PREDICATE) + PredNameBuffer = "FCMP_"; + else + PredNameBuffer = "ICMP_"; + PredNameBuffer += CmpInst::getPredicateName(Pred); + return PredNameBuffer; +} + StringRef Vocabulary::getStringKey(unsigned Pos) { assert(Pos < NumCanonicalEntries && "Position out of bounds in vocabulary"); // Opcode if (Pos < MaxOpcodes) return getVocabKeyForOpcode(Pos + 1); // Type - if (Pos < MaxOpcodes + MaxCanonicalTypeIDs) + if (Pos < OperandBaseOffset) return getVocabKeyForCanonicalTypeID( static_cast<CanonicalTypeID>(Pos - MaxOpcodes)); // Operand - return getVocabKeyForOperandKind( - static_cast<OperandKind>(Pos - MaxOpcodes - MaxCanonicalTypeIDs)); + if (Pos < PredicateBaseOffset) + return getVocabKeyForOperandKind( + static_cast<OperandKind>(Pos - OperandBaseOffset)); + // Predicates + return getVocabKeyForPredicate(getPredicate(Pos - PredicateBaseOffset)); } // For now, assume vocabulary is stable unless explicitly invalidated. @@ -359,19 +409,51 @@ bool Vocabulary::invalidate(Module &M, const PreservedAnalyses &PA, return !(PAC.preservedWhenStateless()); } -Vocabulary::VocabVector Vocabulary::createDummyVocabForTest(unsigned Dim) { - VocabVector DummyVocab; - DummyVocab.reserve(NumCanonicalEntries); +VocabStorage Vocabulary::createDummyVocabForTest(unsigned Dim) { float DummyVal = 0.1f; - // Create a dummy vocabulary with entries for all opcodes, types, and - // operands - for ([[maybe_unused]] unsigned _ : - seq(0u, Vocabulary::MaxOpcodes + Vocabulary::MaxCanonicalTypeIDs + - Vocabulary::MaxOperandKinds)) { - DummyVocab.push_back(Embedding(Dim, DummyVal)); + + // Create sections for opcodes, types, operands, and predicates + // Order must match Vocabulary::Section enum + std::vector<std::vector<Embedding>> Sections; + Sections.reserve(4); + + // Opcodes section + std::vector<Embedding> OpcodeSec; + OpcodeSec.reserve(MaxOpcodes); + for (unsigned I = 0; I < MaxOpcodes; ++I) { + OpcodeSec.emplace_back(Dim, DummyVal); DummyVal += 0.1f; } - return DummyVocab; + Sections.push_back(std::move(OpcodeSec)); + + // Types section + std::vector<Embedding> TypeSec; + TypeSec.reserve(MaxCanonicalTypeIDs); + for (unsigned I = 0; I < MaxCanonicalTypeIDs; ++I) { + TypeSec.emplace_back(Dim, DummyVal); + DummyVal += 0.1f; + } + Sections.push_back(std::move(TypeSec)); + + // Operands section + std::vector<Embedding> OperandSec; + OperandSec.reserve(MaxOperandKinds); + for (unsigned I = 0; I < MaxOperandKinds; ++I) { + OperandSec.emplace_back(Dim, DummyVal); + DummyVal += 0.1f; + } + Sections.push_back(std::move(OperandSec)); + + // Predicates section + std::vector<Embedding> PredicateSec; + PredicateSec.reserve(MaxPredicateKinds); + for (unsigned I = 0; I < MaxPredicateKinds; ++I) { + PredicateSec.emplace_back(Dim, DummyVal); + DummyVal += 0.1f; + } + Sections.push_back(std::move(PredicateSec)); + + return VocabStorage(std::move(Sections)); } // ==----------------------------------------------------------------------===// @@ -417,7 +499,9 @@ Error IR2VecVocabAnalysis::parseVocabSection( // FIXME: Make this optional. We can avoid file reads // by auto-generating a default vocabulary during the build time. -Error IR2VecVocabAnalysis::readVocabulary() { +Error IR2VecVocabAnalysis::readVocabulary(VocabMap &OpcVocab, + VocabMap &TypeVocab, + VocabMap &ArgVocab) { auto BufOrError = MemoryBuffer::getFileOrSTDIN(VocabFile, /*IsText=*/true); if (!BufOrError) return createFileError(VocabFile, BufOrError.getError()); @@ -448,7 +532,9 @@ Error IR2VecVocabAnalysis::readVocabulary() { return Error::success(); } -void IR2VecVocabAnalysis::generateNumMappedVocab() { +void IR2VecVocabAnalysis::generateVocabStorage(VocabMap &OpcVocab, + VocabMap &TypeVocab, + VocabMap &ArgVocab) { // Helper for handling missing entities in the vocabulary. // Currently, we use a zero vector. In the future, we will throw an error to @@ -466,7 +552,6 @@ void IR2VecVocabAnalysis::generateNumMappedVocab() { // Handle Opcodes std::vector<Embedding> NumericOpcodeEmbeddings(Vocabulary::MaxOpcodes, Embedding(Dim)); - NumericOpcodeEmbeddings.reserve(Vocabulary::MaxOpcodes); for (unsigned Opcode : seq(0u, Vocabulary::MaxOpcodes)) { StringRef VocabKey = Vocabulary::getVocabKeyForOpcode(Opcode + 1); auto It = OpcVocab.find(VocabKey.str()); @@ -475,13 +560,10 @@ void IR2VecVocabAnalysis::generateNumMappedVocab() { else handleMissingEntity(VocabKey.str()); } - Vocab.insert(Vocab.end(), NumericOpcodeEmbeddings.begin(), - NumericOpcodeEmbeddings.end()); // Handle Types - only canonical types are present in vocabulary std::vector<Embedding> NumericTypeEmbeddings(Vocabulary::MaxCanonicalTypeIDs, Embedding(Dim)); - NumericTypeEmbeddings.reserve(Vocabulary::MaxCanonicalTypeIDs); for (unsigned CTypeID : seq(0u, Vocabulary::MaxCanonicalTypeIDs)) { StringRef VocabKey = Vocabulary::getVocabKeyForCanonicalTypeID( static_cast<Vocabulary::CanonicalTypeID>(CTypeID)); @@ -491,13 +573,10 @@ void IR2VecVocabAnalysis::generateNumMappedVocab() { } handleMissingEntity(VocabKey.str()); } - Vocab.insert(Vocab.end(), NumericTypeEmbeddings.begin(), - NumericTypeEmbeddings.end()); // Handle Arguments/Operands std::vector<Embedding> NumericArgEmbeddings(Vocabulary::MaxOperandKinds, Embedding(Dim)); - NumericArgEmbeddings.reserve(Vocabulary::MaxOperandKinds); for (unsigned OpKind : seq(0u, Vocabulary::MaxOperandKinds)) { Vocabulary::OperandKind Kind = static_cast<Vocabulary::OperandKind>(OpKind); StringRef VocabKey = Vocabulary::getVocabKeyForOperandKind(Kind); @@ -508,15 +587,37 @@ void IR2VecVocabAnalysis::generateNumMappedVocab() { } handleMissingEntity(VocabKey.str()); } - Vocab.insert(Vocab.end(), NumericArgEmbeddings.begin(), - NumericArgEmbeddings.end()); -} -IR2VecVocabAnalysis::IR2VecVocabAnalysis(const VocabVector &Vocab) - : Vocab(Vocab) {} + // Handle Predicates: part of Operands section. We look up predicate keys + // in ArgVocab. + std::vector<Embedding> NumericPredEmbeddings(Vocabulary::MaxPredicateKinds, + Embedding(Dim, 0)); + for (unsigned PK : seq(0u, Vocabulary::MaxPredicateKinds)) { + StringRef VocabKey = + Vocabulary::getVocabKeyForPredicate(Vocabulary::getPredicate(PK)); + auto It = ArgVocab.find(VocabKey.str()); + if (It != ArgVocab.end()) { + NumericPredEmbeddings[PK] = It->second; + continue; + } + handleMissingEntity(VocabKey.str()); + } + + // Create section-based storage instead of flat vocabulary + // Order must match Vocabulary::Section enum + std::vector<std::vector<Embedding>> Sections(4); + Sections[static_cast<unsigned>(Vocabulary::Section::Opcodes)] = + std::move(NumericOpcodeEmbeddings); // Section::Opcodes + Sections[static_cast<unsigned>(Vocabulary::Section::CanonicalTypes)] = + std::move(NumericTypeEmbeddings); // Section::CanonicalTypes + Sections[static_cast<unsigned>(Vocabulary::Section::Operands)] = + std::move(NumericArgEmbeddings); // Section::Operands + Sections[static_cast<unsigned>(Vocabulary::Section::Predicates)] = + std::move(NumericPredEmbeddings); // Section::Predicates -IR2VecVocabAnalysis::IR2VecVocabAnalysis(VocabVector &&Vocab) - : Vocab(std::move(Vocab)) {} + // Create VocabStorage from organized sections + Vocab.emplace(std::move(Sections)); +} void IR2VecVocabAnalysis::emitError(Error Err, LLVMContext &Ctx) { handleAllErrors(std::move(Err), [&](const ErrorInfoBase &EI) { @@ -528,8 +629,8 @@ IR2VecVocabAnalysis::Result IR2VecVocabAnalysis::run(Module &M, ModuleAnalysisManager &AM) { auto Ctx = &M.getContext(); // If vocabulary is already populated by the constructor, use it. - if (!Vocab.empty()) - return Vocabulary(std::move(Vocab)); + if (Vocab.has_value()) + return Vocabulary(std::move(Vocab.value())); // Otherwise, try to read from the vocabulary file. if (VocabFile.empty()) { @@ -538,7 +639,9 @@ IR2VecVocabAnalysis::run(Module &M, ModuleAnalysisManager &AM) { "set it using --ir2vec-vocab-path"); return Vocabulary(); // Return invalid result } - if (auto Err = readVocabulary()) { + + VocabMap OpcVocab, TypeVocab, ArgVocab; + if (auto Err = readVocabulary(OpcVocab, TypeVocab, ArgVocab)) { emitError(std::move(Err), *Ctx); return Vocabulary(); } @@ -553,9 +656,9 @@ IR2VecVocabAnalysis::run(Module &M, ModuleAnalysisManager &AM) { scaleVocabSection(ArgVocab, ArgWeight); // Generate the numeric lookup vocabulary - generateNumMappedVocab(); + generateVocabStorage(OpcVocab, TypeVocab, ArgVocab); - return Vocabulary(std::move(Vocab)); + return Vocabulary(std::move(Vocab.value())); } // ==----------------------------------------------------------------------===// @@ -564,7 +667,7 @@ IR2VecVocabAnalysis::run(Module &M, ModuleAnalysisManager &AM) { PreservedAnalyses IR2VecPrinterPass::run(Module &M, ModuleAnalysisManager &MAM) { - auto Vocabulary = MAM.getResult<IR2VecVocabAnalysis>(M); + auto &Vocabulary = MAM.getResult<IR2VecVocabAnalysis>(M); assert(Vocabulary.isValid() && "IR2Vec Vocabulary is invalid"); for (Function &F : M) { @@ -606,7 +709,7 @@ PreservedAnalyses IR2VecPrinterPass::run(Module &M, PreservedAnalyses IR2VecVocabPrinterPass::run(Module &M, ModuleAnalysisManager &MAM) { - auto IR2VecVocabulary = MAM.getResult<IR2VecVocabAnalysis>(M); + auto &IR2VecVocabulary = MAM.getResult<IR2VecVocabAnalysis>(M); assert(IR2VecVocabulary.isValid() && "IR2Vec Vocabulary is invalid"); // Print each entry diff --git a/llvm/lib/Analysis/IndirectCallPromotionAnalysis.cpp b/llvm/lib/Analysis/IndirectCallPromotionAnalysis.cpp index 7b93474..25e7a97 100644 --- a/llvm/lib/Analysis/IndirectCallPromotionAnalysis.cpp +++ b/llvm/lib/Analysis/IndirectCallPromotionAnalysis.cpp @@ -22,6 +22,8 @@ using namespace llvm; #define DEBUG_TYPE "pgo-icall-prom-analysis" +namespace llvm { + // The percent threshold for the direct-call target (this call site vs the // remaining call count) for it to be considered as the promotion target. static cl::opt<unsigned> ICPRemainingPercentThreshold( @@ -54,6 +56,8 @@ cl::opt<unsigned> MaxNumVTableAnnotations( "icp-max-num-vtables", cl::init(6), cl::Hidden, cl::desc("Max number of vtables annotated for a vtable load instruction.")); +} // end namespace llvm + bool ICallPromotionAnalysis::isPromotionProfitable(uint64_t Count, uint64_t TotalCount, uint64_t RemainingCount) { diff --git a/llvm/lib/Analysis/InlineAdvisor.cpp b/llvm/lib/Analysis/InlineAdvisor.cpp index 28b14c2..0fa804f 100644 --- a/llvm/lib/Analysis/InlineAdvisor.cpp +++ b/llvm/lib/Analysis/InlineAdvisor.cpp @@ -217,7 +217,7 @@ AnalysisKey PluginInlineAdvisorAnalysis::Key; bool InlineAdvisorAnalysis::initializeIR2VecVocabIfRequested( Module &M, ModuleAnalysisManager &MAM) { if (!IR2VecVocabFile.empty()) { - auto IR2VecVocabResult = MAM.getResult<IR2VecVocabAnalysis>(M); + auto &IR2VecVocabResult = MAM.getResult<IR2VecVocabAnalysis>(M); if (!IR2VecVocabResult.isValid()) { M.getContext().emitError("Failed to load IR2Vec vocabulary"); return false; diff --git a/llvm/lib/Analysis/MemoryProfileInfo.cpp b/llvm/lib/Analysis/MemoryProfileInfo.cpp index b5ca6b1..11602d2 100644 --- a/llvm/lib/Analysis/MemoryProfileInfo.cpp +++ b/llvm/lib/Analysis/MemoryProfileInfo.cpp @@ -22,6 +22,8 @@ using namespace llvm::memprof; #define DEBUG_TYPE "memory-profile-info" +namespace llvm { + cl::opt<bool> MemProfReportHintedSizes( "memprof-report-hinted-sizes", cl::init(false), cl::Hidden, cl::desc("Report total allocation sizes of hinted allocations")); @@ -52,6 +54,8 @@ cl::opt<unsigned> MinPercentMaxColdSize( "memprof-min-percent-max-cold-size", cl::init(100), cl::Hidden, cl::desc("Min percent of max cold bytes for critical cold context")); +} // end namespace llvm + bool llvm::memprof::metadataIncludesAllContextSizeInfo() { return MemProfReportHintedSizes || MinClonedColdBytePercent < 100; } diff --git a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp index a317ac4..a60a4bb 100644 --- a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp +++ b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp @@ -67,7 +67,6 @@ using namespace llvm::memprof; namespace llvm { FunctionSummary::ForceSummaryHotnessType ForceSummaryEdgesCold = FunctionSummary::FSHT_None; -} // namespace llvm static cl::opt<FunctionSummary::ForceSummaryHotnessType, true> FSEC( "force-summary-edges-cold", cl::Hidden, cl::location(ForceSummaryEdgesCold), @@ -91,6 +90,7 @@ LLVM_ABI extern cl::opt<bool> ScalePartialSampleProfileWorkingSetSize; extern cl::opt<unsigned> MaxNumVTableAnnotations; extern cl::opt<bool> MemProfReportHintedSizes; +} // namespace llvm // Walk through the operands of a given User via worklist iteration and populate // the set of GlobalValue references encountered. Invoked either on an diff --git a/llvm/lib/Analysis/ProfileSummaryInfo.cpp b/llvm/lib/Analysis/ProfileSummaryInfo.cpp index f1c3155..44d7a17 100644 --- a/llvm/lib/Analysis/ProfileSummaryInfo.cpp +++ b/llvm/lib/Analysis/ProfileSummaryInfo.cpp @@ -24,6 +24,8 @@ #include <optional> using namespace llvm; +namespace llvm { + static cl::opt<bool> PartialProfile( "partial-profile", cl::Hidden, cl::init(false), cl::desc("Specify the current profile is used as a partial profile.")); @@ -44,6 +46,8 @@ static cl::opt<double> PartialSampleProfileWorkingSetSizeScaleFactor( "and the factor to scale the working set size to use the same " "shared thresholds as PGO.")); +} // end namespace llvm + // The profile summary metadata may be attached either by the frontend or by // any backend passes (IR level instrumentation, for example). This method // checks if the Summary is null and if so checks if the summary metadata is now diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index b08399b..63e1b14 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -3598,6 +3598,13 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, } } + // TODO: Generalize to handle any common factors. + // udiv (mul nuw a, vscale), (mul nuw b, vscale) --> udiv a, b + const SCEV *NewLHS, *NewRHS; + if (match(LHS, m_scev_c_NUWMul(m_SCEV(NewLHS), m_SCEVVScale())) && + match(RHS, m_scev_c_NUWMul(m_SCEV(NewRHS), m_SCEVVScale()))) + return getUDivExpr(NewLHS, NewRHS); + // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs // changes). Make sure we get a new one. IP = nullptr; diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 6f11b25..09a8fbe 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -7651,25 +7651,26 @@ static bool isGuaranteedNotToBeUndefOrPoison( return true; } - if (const auto *PN = dyn_cast<PHINode>(V)) { - unsigned Num = PN->getNumIncomingValues(); - bool IsWellDefined = true; - for (unsigned i = 0; i < Num; ++i) { - if (PN == PN->getIncomingValue(i)) - continue; - auto *TI = PN->getIncomingBlock(i)->getTerminator(); - if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI, - DT, Depth + 1, Kind)) { - IsWellDefined = false; - break; + if (!::canCreateUndefOrPoison(Opr, Kind, + /*ConsiderFlagsAndMetadata=*/true)) { + if (const auto *PN = dyn_cast<PHINode>(V)) { + unsigned Num = PN->getNumIncomingValues(); + bool IsWellDefined = true; + for (unsigned i = 0; i < Num; ++i) { + if (PN == PN->getIncomingValue(i)) + continue; + auto *TI = PN->getIncomingBlock(i)->getTerminator(); + if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI, + DT, Depth + 1, Kind)) { + IsWellDefined = false; + break; + } } - } - if (IsWellDefined) + if (IsWellDefined) + return true; + } else if (all_of(Opr->operands(), OpCheck)) return true; - } else if (!::canCreateUndefOrPoison(Opr, Kind, - /*ConsiderFlagsAndMetadata*/ true) && - all_of(Opr->operands(), OpCheck)) - return true; + } } if (auto *I = dyn_cast<LoadInst>(V)) diff --git a/llvm/lib/CGData/CodeGenData.cpp b/llvm/lib/CGData/CodeGenData.cpp index b4f08c3..7900dc7 100644 --- a/llvm/lib/CGData/CodeGenData.cpp +++ b/llvm/lib/CGData/CodeGenData.cpp @@ -31,11 +31,14 @@ static cl::opt<bool> static cl::opt<std::string> CodeGenDataUsePath("codegen-data-use-path", cl::init(""), cl::Hidden, cl::desc("File path to where .cgdata file is read")); + +namespace llvm { cl::opt<bool> CodeGenDataThinLTOTwoRounds( "codegen-data-thinlto-two-rounds", cl::init(false), cl::Hidden, cl::desc("Enable two-round ThinLTO code generation. The first round " "emits codegen data, while the second round uses the emitted " "codegen data for further optimizations.")); +} // end namespace llvm static std::string getCGDataErrString(cgdata_error Err, const std::string &ErrMsg = "") { diff --git a/llvm/lib/CGData/CodeGenDataReader.cpp b/llvm/lib/CGData/CodeGenDataReader.cpp index 3fd8cfe..b1cd939 100644 --- a/llvm/lib/CGData/CodeGenDataReader.cpp +++ b/llvm/lib/CGData/CodeGenDataReader.cpp @@ -26,14 +26,14 @@ static cl::opt<bool> IndexedCodeGenDataReadFunctionMapNames( "disabled to save memory and time for final consumption of the " "indexed CodeGenData in production.")); +namespace llvm { + cl::opt<bool> IndexedCodeGenDataLazyLoading( "indexed-codegen-data-lazy-loading", cl::init(false), cl::Hidden, cl::desc( "Lazily load indexed CodeGenData. Enable to save memory and time " "for final consumption of the indexed CodeGenData in production.")); -namespace llvm { - static Expected<std::unique_ptr<MemoryBuffer>> setupMemoryBuffer(const Twine &Filename, vfs::FileSystem &FS) { auto BufferOrErr = Filename.str() == "-" ? MemoryBuffer::getSTDIN() diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 701a6a2..11efe49 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -473,11 +473,9 @@ bool AsmPrinter::doInitialization(Module &M) { AddrLabelSymbols = nullptr; // Initialize TargetLoweringObjectFile. - const_cast<TargetLoweringObjectFile&>(getObjFileLowering()) - .Initialize(OutContext, TM); + TM.getObjFileLowering()->Initialize(OutContext, TM); - const_cast<TargetLoweringObjectFile &>(getObjFileLowering()) - .getModuleMetadata(M); + TM.getObjFileLowering()->getModuleMetadata(M); // On AIX, we delay emitting any section information until // after emitting the .file pseudo-op. This allows additional diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp index 477e5c1..c2d474f 100644 --- a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp @@ -34,7 +34,7 @@ cl::opt<bool> llvm::DisableGISelLegalityCheck( cl::desc("Don't verify that MIR is fully legal between GlobalISel passes"), cl::Hidden); -cl::opt<bool> VerboseVerifyLegalizerInfo( +static cl::opt<bool> VerboseVerifyLegalizerInfo( "verbose-gisel-verify-legalizer-info", cl::desc("Print more information to dbgs about GlobalISel legalizer rules " "being verified"), diff --git a/llvm/lib/CodeGen/MachineRegionInfo.cpp b/llvm/lib/CodeGen/MachineRegionInfo.cpp index f8268b8..366755a 100644 --- a/llvm/lib/CodeGen/MachineRegionInfo.cpp +++ b/llvm/lib/CodeGen/MachineRegionInfo.cpp @@ -10,6 +10,7 @@ #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/RegionInfoImpl.h" #include "llvm/CodeGen/MachinePostDominators.h" +#include "llvm/CodeGen/Passes.h" #include "llvm/Config/llvm-config.h" #include "llvm/InitializePasses.h" #include "llvm/Pass.h" @@ -127,7 +128,7 @@ LLVM_DUMP_METHOD void MachineRegionInfoPass::dump() const { #endif char MachineRegionInfoPass::ID = 0; -char &MachineRegionInfoPassID = MachineRegionInfoPass::ID; +char &llvm::MachineRegionInfoPassID = MachineRegionInfoPass::ID; INITIALIZE_PASS_BEGIN(MachineRegionInfoPass, DEBUG_TYPE, "Detect single entry single exit regions", true, true) diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp index 729a57e..e1d39d6 100644 --- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp +++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp @@ -1929,7 +1929,27 @@ ValueTrackerResult ValueTracker::getNextSourceFromCopy() { const MachineOperand &Src = Def->getOperand(1); if (Src.isUndef()) return ValueTrackerResult(); - return ValueTrackerResult(Src.getReg(), Src.getSubReg()); + + Register SrcReg = Src.getReg(); + unsigned SubReg = Src.getSubReg(); + if (DefSubReg) { + const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo(); + SubReg = TRI->composeSubRegIndices(SubReg, DefSubReg); + + if (SrcReg.isVirtual()) { + // TODO: Try constraining on rewrite if we can + const TargetRegisterClass *RegRC = MRI.getRegClass(SrcReg); + const TargetRegisterClass *SrcWithSubRC = + TRI->getSubClassWithSubReg(RegRC, SubReg); + if (RegRC != SrcWithSubRC) + return ValueTrackerResult(); + } else { + if (!TRI->getSubReg(SrcReg, SubReg)) + return ValueTrackerResult(); + } + } + + return ValueTrackerResult(SrcReg, SubReg); } ValueTrackerResult ValueTracker::getNextSourceFromBitcast() { diff --git a/llvm/lib/CodeGen/RegAllocScore.cpp b/llvm/lib/CodeGen/RegAllocScore.cpp index 9c9cc1f..280946b 100644 --- a/llvm/lib/CodeGen/RegAllocScore.cpp +++ b/llvm/lib/CodeGen/RegAllocScore.cpp @@ -23,6 +23,8 @@ #include "llvm/Support/CommandLine.h" using namespace llvm; + +namespace llvm { LLVM_ABI cl::opt<double> CopyWeight("regalloc-copy-weight", cl::init(0.2), cl::Hidden); LLVM_ABI cl::opt<double> LoadWeight("regalloc-load-weight", cl::init(4.0), @@ -33,6 +35,8 @@ LLVM_ABI cl::opt<double> CheapRematWeight("regalloc-cheap-remat-weight", cl::init(0.2), cl::Hidden); LLVM_ABI cl::opt<double> ExpensiveRematWeight("regalloc-expensive-remat-weight", cl::init(1.0), cl::Hidden); +} // end namespace llvm + #define DEBUG_TYPE "regalloc-score" RegAllocScore &RegAllocScore::operator+=(const RegAllocScore &Other) { diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp index 7ac1aef..ebfea8e 100644 --- a/llvm/lib/CodeGen/RegisterCoalescer.cpp +++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp @@ -584,14 +584,14 @@ bool CoalescerPair::isCoalescable(const MachineInstr *MI) const { return DstReg == Dst; // This is a partial register copy. Check that the parts match. return Register(TRI.getSubReg(DstReg, SrcSub)) == Dst; - } else { - // DstReg is virtual. - if (DstReg != Dst) - return false; - // Registers match, do the subregisters line up? - return TRI.composeSubRegIndices(SrcIdx, SrcSub) == - TRI.composeSubRegIndices(DstIdx, DstSub); } + + // DstReg is virtual. + if (DstReg != Dst) + return false; + // Registers match, do the subregisters line up? + return TRI.composeSubRegIndices(SrcIdx, SrcSub) == + TRI.composeSubRegIndices(DstIdx, DstSub); } void RegisterCoalescerLegacy::getAnalysisUsage(AnalysisUsage &AU) const { @@ -2914,8 +2914,7 @@ JoinVals::ConflictResolution JoinVals::analyzeValue(unsigned ValNo, if ((V.ValidLanes & OtherV.ValidLanes).any()) // Overlapping lanes can't be resolved. return CR_Impossible; - else - return CR_Merge; + return CR_Merge; } // No simultaneous def. Is Other live at the def? diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index b5201a3..c21890a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -8103,10 +8103,6 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, return; } case Intrinsic::vector_partial_reduce_add: { - if (!TLI.shouldExpandPartialReductionIntrinsic(cast<IntrinsicInst>(&I))) { - visitTargetIntrinsic(I, Intrinsic); - return; - } SDValue Acc = getValue(I.getOperand(0)); SDValue Input = getValue(I.getOperand(1)); setValue(&I, diff --git a/llvm/lib/DebugInfo/LogicalView/Core/LVReader.cpp b/llvm/lib/DebugInfo/LogicalView/Core/LVReader.cpp index c1017d8..d973a47 100644 --- a/llvm/lib/DebugInfo/LogicalView/Core/LVReader.cpp +++ b/llvm/lib/DebugInfo/LogicalView/Core/LVReader.cpp @@ -148,7 +148,7 @@ std::error_code LVSplitContext::open(std::string ContextName, return std::error_code(); } -LVReader *CurrentReader = nullptr; +static LVReader *CurrentReader = nullptr; LVReader &LVReader::getInstance() { if (CurrentReader) return *CurrentReader; diff --git a/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp b/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp index 92c62b8..2b33e56 100644 --- a/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp +++ b/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp @@ -113,6 +113,13 @@ static raw_ostream &operator<<(raw_ostream &OS, return OS; } +static raw_ostream &operator<<(raw_ostream &OS, + const llvm::dxbc::StaticSamplerFlags &Flags) { + printFlags(OS, Flags, dxbc::getStaticSamplerFlags()); + + return OS; +} + raw_ostream &operator<<(raw_ostream &OS, const dxbc::RootFlags &Flags) { OS << "RootFlags("; printFlags(OS, Flags, dxbc::getRootFlags()); @@ -172,7 +179,7 @@ raw_ostream &operator<<(raw_ostream &OS, const StaticSampler &Sampler) { << ", borderColor = " << Sampler.BorderColor << ", minLOD = " << Sampler.MinLOD << ", maxLOD = " << Sampler.MaxLOD << ", space = " << Sampler.Space << ", visibility = " << Sampler.Visibility - << ")"; + << ", flags = " << Sampler.Flags << ")"; return OS; } diff --git a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp index 5785505..7a0cf40 100644 --- a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp +++ b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp @@ -218,6 +218,7 @@ MDNode *MetadataBuilder::BuildStaticSampler(const StaticSampler &Sampler) { ConstantAsMetadata::get(Builder.getInt32(Sampler.Space)), ConstantAsMetadata::get( Builder.getInt32(to_underlying(Sampler.Visibility))), + ConstantAsMetadata::get(Builder.getInt32(to_underlying(Sampler.Flags))), }; return MDNode::get(Ctx, Operands); } @@ -417,7 +418,7 @@ Error MetadataParser::parseDescriptorTable(mcdxbc::RootSignatureDesc &RSD, Error MetadataParser::parseStaticSampler(mcdxbc::RootSignatureDesc &RSD, MDNode *StaticSamplerNode) { - if (StaticSamplerNode->getNumOperands() != 14) + if (StaticSamplerNode->getNumOperands() != 15) return make_error<InvalidRSMetadataFormat>("Static Sampler"); mcdxbc::StaticSampler Sampler; @@ -501,6 +502,17 @@ Error MetadataParser::parseStaticSampler(mcdxbc::RootSignatureDesc &RSD, return Error(std::move(E)); Sampler.ShaderVisibility = *Visibility; + if (RSD.Version < 3) { + RSD.StaticSamplers.push_back(Sampler); + return Error::success(); + } + assert(RSD.Version >= 3); + + if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 14)) + Sampler.Flags = *Val; + else + return make_error<InvalidRSMetadataValue>("Static Sampler Flags"); + RSD.StaticSamplers.push_back(Sampler); return Error::success(); } diff --git a/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp b/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp index 2c78d62..8a2b03d 100644 --- a/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp +++ b/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp @@ -40,7 +40,7 @@ bool verifyRootDescriptorFlag(uint32_t Version, uint32_t FlagsVal) { if (Version == 1) return Flags == FlagT::DataVolatile; - assert(Version == 2 && "Provided invalid root signature version"); + assert((Version <= 3) && "Provided invalid root signature version"); // The data-specific flags are mutually exclusive. FlagT DataFlags = FlagT::DataVolatile | FlagT::DataStatic | diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp index e29179b..245129f 100644 --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -127,7 +127,7 @@ static void orderValue(const Value *V, OrderMap &OM) { if (OM.lookup(V)) return; - if (const Constant *C = dyn_cast<Constant>(V)) { + if (const auto *C = dyn_cast<Constant>(V)) { if (isa<ConstantData>(C)) return; @@ -146,17 +146,17 @@ static void orderValue(const Value *V, OrderMap &OM) { static OrderMap orderModule(const Module *M) { OrderMap OM; - auto orderConstantValue = [&OM](const Value *V) { + auto OrderConstantValue = [&OM](const Value *V) { if (isa<Constant>(V) || isa<InlineAsm>(V)) orderValue(V, OM); }; auto OrderConstantFromMetadata = [&](Metadata *MD) { if (const auto *VAM = dyn_cast<ValueAsMetadata>(MD)) { - orderConstantValue(VAM->getValue()); + OrderConstantValue(VAM->getValue()); } else if (const auto *AL = dyn_cast<DIArgList>(MD)) { for (const auto *VAM : AL->getArgs()) - orderConstantValue(VAM->getValue()); + OrderConstantValue(VAM->getValue()); } }; @@ -302,18 +302,18 @@ static UseListOrderMap predictUseListOrder(const Module *M) { } static const Module *getModuleFromVal(const Value *V) { - if (const Argument *MA = dyn_cast<Argument>(V)) + if (const auto *MA = dyn_cast<Argument>(V)) return MA->getParent() ? MA->getParent()->getParent() : nullptr; - if (const BasicBlock *BB = dyn_cast<BasicBlock>(V)) + if (const auto *BB = dyn_cast<BasicBlock>(V)) return BB->getParent() ? BB->getParent()->getParent() : nullptr; - if (const Instruction *I = dyn_cast<Instruction>(V)) { + if (const auto *I = dyn_cast<Instruction>(V)) { const Function *M = I->getParent() ? I->getParent()->getParent() : nullptr; return M ? M->getParent() : nullptr; } - if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) + if (const auto *GV = dyn_cast<GlobalValue>(V)) return GV->getParent(); if (const auto *MAV = dyn_cast<MetadataAsValue>(V)) { @@ -337,7 +337,7 @@ static const Module *getModuleFromDPI(const DbgRecord *DR) { return DR->getMarker() ? getModuleFromDPI(DR->getMarker()) : nullptr; } -static void PrintCallingConv(unsigned cc, raw_ostream &Out) { +static void printCallingConv(unsigned cc, raw_ostream &Out) { switch (cc) { default: Out << "cc" << cc; break; case CallingConv::Fast: Out << "fastcc"; break; @@ -484,7 +484,7 @@ void llvm::printLLVMNameWithoutPrefix(raw_ostream &OS, StringRef Name) { /// Turn the specified name into an 'LLVM name', which is either prefixed with % /// (if the string only contains simple characters) or is surrounded with ""'s /// (if it has special chars in it). Print it out. -static void PrintLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) { +static void printLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) { switch (Prefix) { case NoPrefix: break; @@ -506,12 +506,12 @@ static void PrintLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) { /// Turn the specified name into an 'LLVM name', which is either prefixed with % /// (if the string only contains simple characters) or is surrounded with ""'s /// (if it has special chars in it). Print it out. -static void PrintLLVMName(raw_ostream &OS, const Value *V) { - PrintLLVMName(OS, V->getName(), +static void printLLVMName(raw_ostream &OS, const Value *V) { + printLLVMName(OS, V->getName(), isa<GlobalValue>(V) ? GlobalPrefix : LocalPrefix); } -static void PrintShuffleMask(raw_ostream &Out, Type *Ty, ArrayRef<int> Mask) { +static void printShuffleMask(raw_ostream &Out, Type *Ty, ArrayRef<int> Mask) { Out << ", <"; if (isa<ScalableVectorType>(Ty)) Out << "vscale x "; @@ -668,7 +668,7 @@ void TypePrinting::print(Type *Ty, raw_ostream &OS) { return printStructBody(STy, OS); if (!STy->getName().empty()) - return PrintLLVMName(OS, STy->getName(), LocalPrefix); + return printLLVMName(OS, STy->getName(), LocalPrefix); incorporateTypes(); const auto I = Type2Number.find(STy); @@ -999,26 +999,26 @@ void ModuleSlotTracker::setProcessHook( } static SlotTracker *createSlotTracker(const Value *V) { - if (const Argument *FA = dyn_cast<Argument>(V)) + if (const auto *FA = dyn_cast<Argument>(V)) return new SlotTracker(FA->getParent()); - if (const Instruction *I = dyn_cast<Instruction>(V)) + if (const auto *I = dyn_cast<Instruction>(V)) if (I->getParent()) return new SlotTracker(I->getParent()->getParent()); - if (const BasicBlock *BB = dyn_cast<BasicBlock>(V)) + if (const auto *BB = dyn_cast<BasicBlock>(V)) return new SlotTracker(BB->getParent()); - if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) + if (const auto *GV = dyn_cast<GlobalVariable>(V)) return new SlotTracker(GV->getParent()); - if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) + if (const auto *GA = dyn_cast<GlobalAlias>(V)) return new SlotTracker(GA->getParent()); - if (const GlobalIFunc *GIF = dyn_cast<GlobalIFunc>(V)) + if (const auto *GIF = dyn_cast<GlobalIFunc>(V)) return new SlotTracker(GIF->getParent()); - if (const Function *Func = dyn_cast<Function>(V)) + if (const auto *Func = dyn_cast<Function>(V)) return new SlotTracker(Func); return nullptr; @@ -1218,7 +1218,7 @@ void SlotTracker::processDbgRecordMetadata(const DbgRecord &DR) { // but we can have faulty metadata from debug-intrinsic days being // autoupgraded into debug records. This gets caught by the verifier, which // then will print the faulty IR, hitting this code path. - if (const DbgVariableRecord *DVR = dyn_cast<const DbgVariableRecord>(&DR)) { + if (const auto *DVR = dyn_cast<const DbgVariableRecord>(&DR)) { // Process metadata used by DbgRecords; we only specifically care about the // DILocalVariable, DILocation, and DIAssignID fields, as the Value and // Expression fields should only be printed inline and so do not use a slot. @@ -1233,7 +1233,7 @@ void SlotTracker::processDbgRecordMetadata(const DbgRecord &DR) { if (auto *Empty = dyn_cast_if_present<MDNode>(DVR->getRawAddress())) CreateMetadataSlot(Empty); } - } else if (const DbgLabelRecord *DLR = dyn_cast<const DbgLabelRecord>(&DR)) { + } else if (const auto *DLR = dyn_cast<const DbgLabelRecord>(&DR)) { CreateMetadataSlot(DLR->getRawLabel()); } else { llvm_unreachable("unsupported DbgRecord kind"); @@ -1244,12 +1244,12 @@ void SlotTracker::processDbgRecordMetadata(const DbgRecord &DR) { void SlotTracker::processInstructionMetadata(const Instruction &I) { // Process metadata used directly by intrinsics. - if (const CallInst *CI = dyn_cast<CallInst>(&I)) + if (const auto *CI = dyn_cast<CallInst>(&I)) if (Function *F = CI->getCalledFunction()) if (F->isIntrinsic()) for (auto &Op : I.operands()) if (auto *V = dyn_cast_or_null<MetadataAsValue>(Op)) - if (MDNode *N = dyn_cast<MDNode>(V->getMetadata())) + if (auto *N = dyn_cast<MDNode>(V->getMetadata())) CreateMetadataSlot(N); // Process metadata attached to this instruction. @@ -1406,7 +1406,7 @@ void SlotTracker::CreateMetadataSlot(const MDNode *N) { // Recursively add any MDNodes referenced by operands. for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) - if (const MDNode *Op = dyn_cast_or_null<MDNode>(N->getOperand(i))) + if (const auto *Op = dyn_cast_or_null<MDNode>(N->getOperand(i))) CreateMetadataSlot(Op); } @@ -1464,33 +1464,30 @@ struct AsmWriterContext { // AsmWriter Implementation //===----------------------------------------------------------------------===// -static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, +static void writeAsOperandInternal(raw_ostream &Out, const Value *V, AsmWriterContext &WriterCtx, bool PrintType = false); -static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD, +static void writeAsOperandInternal(raw_ostream &Out, const Metadata *MD, AsmWriterContext &WriterCtx, bool FromValue = false); -static void WriteOptimizationInfo(raw_ostream &Out, const User *U) { - if (const FPMathOperator *FPO = dyn_cast<const FPMathOperator>(U)) +static void writeOptimizationInfo(raw_ostream &Out, const User *U) { + if (const auto *FPO = dyn_cast<const FPMathOperator>(U)) Out << FPO->getFastMathFlags(); - if (const OverflowingBinaryOperator *OBO = - dyn_cast<OverflowingBinaryOperator>(U)) { + if (const auto *OBO = dyn_cast<OverflowingBinaryOperator>(U)) { if (OBO->hasNoUnsignedWrap()) Out << " nuw"; if (OBO->hasNoSignedWrap()) Out << " nsw"; - } else if (const PossiblyExactOperator *Div = - dyn_cast<PossiblyExactOperator>(U)) { + } else if (const auto *Div = dyn_cast<PossiblyExactOperator>(U)) { if (Div->isExact()) Out << " exact"; - } else if (const PossiblyDisjointInst *PDI = - dyn_cast<PossiblyDisjointInst>(U)) { + } else if (const auto *PDI = dyn_cast<PossiblyDisjointInst>(U)) { if (PDI->isDisjoint()) Out << " disjoint"; - } else if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { + } else if (const auto *GEP = dyn_cast<GEPOperator>(U)) { if (GEP->isInBounds()) Out << " inbounds"; else if (GEP->hasNoUnsignedSignedWrap()) @@ -1515,7 +1512,7 @@ static void WriteOptimizationInfo(raw_ostream &Out, const User *U) { } } -static void WriteAPFloatInternal(raw_ostream &Out, const APFloat &APF) { +static void writeAPFloatInternal(raw_ostream &Out, const APFloat &APF) { if (&APF.getSemantics() == &APFloat::IEEEsingle() || &APF.getSemantics() == &APFloat::IEEEdouble()) { // We would like to output the FP constant value in exponential notation, @@ -1608,9 +1605,9 @@ static void WriteAPFloatInternal(raw_ostream &Out, const APFloat &APF) { llvm_unreachable("Unsupported floating point type"); } -static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, +static void writeConstantInternal(raw_ostream &Out, const Constant *CV, AsmWriterContext &WriterCtx) { - if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) { + if (const auto *CI = dyn_cast<ConstantInt>(CV)) { Type *Ty = CI->getType(); if (Ty->isVectorTy()) { @@ -1630,7 +1627,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, return; } - if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) { + if (const auto *CFP = dyn_cast<ConstantFP>(CV)) { Type *Ty = CFP->getType(); if (Ty->isVectorTy()) { @@ -1639,7 +1636,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, Out << " "; } - WriteAPFloatInternal(Out, CFP->getValueAPF()); + writeAPFloatInternal(Out, CFP->getValueAPF()); if (Ty->isVectorTy()) Out << ")"; @@ -1652,28 +1649,28 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, return; } - if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV)) { + if (const auto *BA = dyn_cast<BlockAddress>(CV)) { Out << "blockaddress("; - WriteAsOperandInternal(Out, BA->getFunction(), WriterCtx); + writeAsOperandInternal(Out, BA->getFunction(), WriterCtx); Out << ", "; - WriteAsOperandInternal(Out, BA->getBasicBlock(), WriterCtx); + writeAsOperandInternal(Out, BA->getBasicBlock(), WriterCtx); Out << ")"; return; } if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(CV)) { Out << "dso_local_equivalent "; - WriteAsOperandInternal(Out, Equiv->getGlobalValue(), WriterCtx); + writeAsOperandInternal(Out, Equiv->getGlobalValue(), WriterCtx); return; } if (const auto *NC = dyn_cast<NoCFIValue>(CV)) { Out << "no_cfi "; - WriteAsOperandInternal(Out, NC->getGlobalValue(), WriterCtx); + writeAsOperandInternal(Out, NC->getGlobalValue(), WriterCtx); return; } - if (const ConstantPtrAuth *CPA = dyn_cast<ConstantPtrAuth>(CV)) { + if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV)) { Out << "ptrauth ("; // ptrauth (ptr CST, i32 KEY[, i64 DISC[, ptr ADDRDISC]?]?) @@ -1686,25 +1683,25 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, ListSeparator LS; for (unsigned i = 0, e = NumOpsToWrite; i != e; ++i) { Out << LS; - WriteAsOperandInternal(Out, CPA->getOperand(i), WriterCtx, + writeAsOperandInternal(Out, CPA->getOperand(i), WriterCtx, /*PrintType=*/true); } Out << ')'; return; } - if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) { + if (const auto *CA = dyn_cast<ConstantArray>(CV)) { Out << '['; ListSeparator LS; for (const Value *Op : CA->operands()) { Out << LS; - WriteAsOperandInternal(Out, Op, WriterCtx, /*PrintType=*/true); + writeAsOperandInternal(Out, Op, WriterCtx, /*PrintType=*/true); } Out << ']'; return; } - if (const ConstantDataArray *CA = dyn_cast<ConstantDataArray>(CV)) { + if (const auto *CA = dyn_cast<ConstantDataArray>(CV)) { // As a special case, print the array as a string if it is an array of // i8 with ConstantInt values. if (CA->isString()) { @@ -1718,14 +1715,14 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, ListSeparator LS; for (uint64_t i = 0, e = CA->getNumElements(); i != e; ++i) { Out << LS; - WriteAsOperandInternal(Out, CA->getElementAsConstant(i), WriterCtx, + writeAsOperandInternal(Out, CA->getElementAsConstant(i), WriterCtx, /*PrintType=*/true); } Out << ']'; return; } - if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(CV)) { + if (const auto *CS = dyn_cast<ConstantStruct>(CV)) { if (CS->getType()->isPacked()) Out << '<'; Out << '{'; @@ -1734,7 +1731,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, ListSeparator LS; for (const Value *Op : CS->operands()) { Out << LS; - WriteAsOperandInternal(Out, Op, WriterCtx, /*PrintType=*/true); + writeAsOperandInternal(Out, Op, WriterCtx, /*PrintType=*/true); } Out << ' '; } @@ -1755,7 +1752,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, if (auto *SplatVal = CV->getSplatValue()) { if (isa<ConstantInt>(SplatVal) || isa<ConstantFP>(SplatVal)) { Out << "splat ("; - WriteAsOperandInternal(Out, SplatVal, WriterCtx, /*PrintType=*/true); + writeAsOperandInternal(Out, SplatVal, WriterCtx, /*PrintType=*/true); Out << ')'; return; } @@ -1765,7 +1762,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, ListSeparator LS; for (unsigned i = 0, e = CVVTy->getNumElements(); i != e; ++i) { Out << LS; - WriteAsOperandInternal(Out, CV->getAggregateElement(i), WriterCtx, + writeAsOperandInternal(Out, CV->getAggregateElement(i), WriterCtx, /*PrintType=*/true); } Out << '>'; @@ -1792,7 +1789,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, return; } - if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) { + if (const auto *CE = dyn_cast<ConstantExpr>(CV)) { // Use the same shorthand for splat vector (i.e. "splat(Ty val)") as is // permitted on IR input to reduce the output changes when enabling // UseConstant{Int,FP}ForScalableSplat. @@ -1802,7 +1799,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, if (auto *SplatVal = CE->getSplatValue()) { if (isa<ConstantInt>(SplatVal) || isa<ConstantFP>(SplatVal)) { Out << "splat ("; - WriteAsOperandInternal(Out, SplatVal, WriterCtx, /*PrintType=*/true); + writeAsOperandInternal(Out, SplatVal, WriterCtx, /*PrintType=*/true); Out << ')'; return; } @@ -1810,10 +1807,10 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, } Out << CE->getOpcodeName(); - WriteOptimizationInfo(Out, CE); + writeOptimizationInfo(Out, CE); Out << " ("; - if (const GEPOperator *GEP = dyn_cast<GEPOperator>(CE)) { + if (const auto *GEP = dyn_cast<GEPOperator>(CE)) { WriterCtx.TypePrinter->print(GEP->getSourceElementType(), Out); Out << ", "; } @@ -1821,7 +1818,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, ListSeparator LS; for (const Value *Op : CE->operands()) { Out << LS; - WriteAsOperandInternal(Out, Op, WriterCtx, /*PrintType=*/true); + writeAsOperandInternal(Out, Op, WriterCtx, /*PrintType=*/true); } if (CE->isCast()) { @@ -1830,7 +1827,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, } if (CE->getOpcode() == Instruction::ShuffleVector) - PrintShuffleMask(Out, CE->getType(), CE->getShuffleMask()); + printShuffleMask(Out, CE->getType(), CE->getShuffleMask()); Out << ')'; return; @@ -1849,9 +1846,9 @@ static void writeMDTuple(raw_ostream &Out, const MDTuple *Node, Out << "null"; } else if (auto *MDV = dyn_cast<ValueAsMetadata>(MD)) { Value *V = MDV->getValue(); - WriteAsOperandInternal(Out, V, WriterCtx, /*PrintType=*/true); + writeAsOperandInternal(Out, V, WriterCtx, /*PrintType=*/true); } else { - WriteAsOperandInternal(Out, MD, WriterCtx); + writeAsOperandInternal(Out, MD, WriterCtx); WriterCtx.onWriteMetadataAsOperand(MD); } } @@ -1939,7 +1936,7 @@ static void writeMetadataAsOperand(raw_ostream &Out, const Metadata *MD, Out << "null"; return; } - WriteAsOperandInternal(Out, MD, WriterCtx); + writeAsOperandInternal(Out, MD, WriterCtx); WriterCtx.onWriteMetadataAsOperand(MD); } @@ -2619,7 +2616,7 @@ static void writeDIArgList(raw_ostream &Out, const DIArgList *N, MDFieldPrinter Printer(Out, WriterCtx); for (const Metadata *Arg : N->getArgs()) { Out << FS; - WriteAsOperandInternal(Out, Arg, WriterCtx, true); + writeAsOperandInternal(Out, Arg, WriterCtx, true); } Out << ")"; } @@ -2662,7 +2659,7 @@ static void writeDIImportedEntity(raw_ostream &Out, const DIImportedEntity *N, Out << ")"; } -static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node, +static void writeMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node, AsmWriterContext &Ctx) { if (Node->isDistinct()) Out << "distinct "; @@ -2682,7 +2679,7 @@ static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node, // Full implementation of printing a Value as an operand with support for // TypePrinting, etc. -static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, +static void writeAsOperandInternal(raw_ostream &Out, const Value *V, AsmWriterContext &WriterCtx, bool PrintType) { if (PrintType) { @@ -2691,18 +2688,18 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, } if (V->hasName()) { - PrintLLVMName(Out, V); + printLLVMName(Out, V); return; } - const Constant *CV = dyn_cast<Constant>(V); + const auto *CV = dyn_cast<Constant>(V); if (CV && !isa<GlobalValue>(CV)) { assert(WriterCtx.TypePrinter && "Constants require TypePrinting!"); - WriteConstantInternal(Out, CV, WriterCtx); + writeConstantInternal(Out, CV, WriterCtx); return; } - if (const InlineAsm *IA = dyn_cast<InlineAsm>(V)) { + if (const auto *IA = dyn_cast<InlineAsm>(V)) { Out << "asm "; if (IA->hasSideEffects()) Out << "sideeffect "; @@ -2722,7 +2719,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, } if (auto *MD = dyn_cast<MetadataAsValue>(V)) { - WriteAsOperandInternal(Out, MD->getMetadata(), WriterCtx, + writeAsOperandInternal(Out, MD->getMetadata(), WriterCtx, /* FromValue */ true); return; } @@ -2732,7 +2729,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, auto *Machine = WriterCtx.Machine; // If we have a SlotTracker, use it. if (Machine) { - if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { + if (const auto *GV = dyn_cast<GlobalValue>(V)) { Slot = Machine->getGlobalSlot(GV); Prefix = '@'; } else { @@ -2749,7 +2746,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, } } else if ((Machine = createSlotTracker(V))) { // Otherwise, create one to get the # and then destroy it. - if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { + if (const auto *GV = dyn_cast<GlobalValue>(V)) { Slot = Machine->getGlobalSlot(GV); Prefix = '@'; } else { @@ -2767,21 +2764,21 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, Out << "<badref>"; } -static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD, +static void writeAsOperandInternal(raw_ostream &Out, const Metadata *MD, AsmWriterContext &WriterCtx, bool FromValue) { // Write DIExpressions and DIArgLists inline when used as a value. Improves // readability of debug info intrinsics. - if (const DIExpression *Expr = dyn_cast<DIExpression>(MD)) { + if (const auto *Expr = dyn_cast<DIExpression>(MD)) { writeDIExpression(Out, Expr, WriterCtx); return; } - if (const DIArgList *ArgList = dyn_cast<DIArgList>(MD)) { + if (const auto *ArgList = dyn_cast<DIArgList>(MD)) { writeDIArgList(Out, ArgList, WriterCtx, FromValue); return; } - if (const MDNode *N = dyn_cast<MDNode>(MD)) { + if (const auto *N = dyn_cast<MDNode>(MD)) { std::unique_ptr<SlotTracker> MachineStorage; SaveAndRestore SARMachine(WriterCtx.Machine); if (!WriterCtx.Machine) { @@ -2790,7 +2787,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD, } int Slot = WriterCtx.Machine->getMetadataSlot(N); if (Slot == -1) { - if (const DILocation *Loc = dyn_cast<DILocation>(N)) { + if (const auto *Loc = dyn_cast<DILocation>(N)) { writeDILocation(Out, Loc, WriterCtx); return; } @@ -2802,7 +2799,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD, return; } - if (const MDString *MDS = dyn_cast<MDString>(MD)) { + if (const auto *MDS = dyn_cast<MDString>(MD)) { Out << "!\""; printEscapedString(MDS->getString(), Out); Out << '"'; @@ -2814,7 +2811,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD, assert((FromValue || !isa<LocalAsMetadata>(V)) && "Unexpected function-local metadata outside of value argument"); - WriteAsOperandInternal(Out, V->getValue(), WriterCtx, /*PrintType=*/true); + writeAsOperandInternal(Out, V->getValue(), WriterCtx, /*PrintType=*/true); } namespace { @@ -2889,7 +2886,7 @@ public: void printDbgRecord(const DbgRecord &DR); void printDbgRecordLine(const DbgRecord &DR); - void printUseListOrder(const Value *V, const std::vector<unsigned> &Shuffle); + void printUseListOrder(const Value *V, ArrayRef<unsigned> Shuffle); void printUseLists(const Function *F); void printModuleSummaryIndex(); @@ -2901,16 +2898,14 @@ public: void printTypeIdSummary(const TypeIdSummary &TIS); void printTypeIdCompatibleVtableSummary(const TypeIdCompatibleVtableInfo &TI); void printTypeTestResolution(const TypeTestResolution &TTRes); - void printArgs(const std::vector<uint64_t> &Args); + void printArgs(ArrayRef<uint64_t> Args); void printWPDRes(const WholeProgramDevirtResolution &WPDRes); void printTypeIdInfo(const FunctionSummary::TypeIdInfo &TIDInfo); void printVFuncId(const FunctionSummary::VFuncId VFId); - void - printNonConstVCalls(const std::vector<FunctionSummary::VFuncId> &VCallList, - const char *Tag); - void - printConstVCalls(const std::vector<FunctionSummary::ConstVCall> &VCallList, - const char *Tag); + void printNonConstVCalls(ArrayRef<FunctionSummary::VFuncId> VCallList, + const char *Tag); + void printConstVCalls(ArrayRef<FunctionSummary::ConstVCall> VCallList, + const char *Tag); private: /// Print out metadata attachments. @@ -2953,7 +2948,7 @@ void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) { return; } auto WriteCtx = getContext(); - WriteAsOperandInternal(Out, Operand, WriteCtx, PrintType); + writeAsOperandInternal(Out, Operand, WriteCtx, PrintType); } void AssemblyWriter::writeSyncScope(const LLVMContext &Context, @@ -3013,7 +3008,7 @@ void AssemblyWriter::writeParamOperand(const Value *Operand, Out << ' '; // Print the operand auto WriterCtx = getContext(); - WriteAsOperandInternal(Out, Operand, WriterCtx); + writeAsOperandInternal(Out, Operand, WriterCtx); } void AssemblyWriter::writeOperandBundles(const CallBase *Call) { @@ -3039,7 +3034,7 @@ void AssemblyWriter::writeOperandBundles(const CallBase *Call) { if (Input == nullptr) Out << "<null operand bundle!>"; else - WriteAsOperandInternal(Out, Input, WriterCtx, /*PrintType=*/true); + writeAsOperandInternal(Out, Input, WriterCtx, /*PrintType=*/true); } Out << ')'; @@ -3311,14 +3306,8 @@ void AssemblyWriter::printTypeIdCompatibleVtableSummary( Out << ")"; } -void AssemblyWriter::printArgs(const std::vector<uint64_t> &Args) { - Out << "args: ("; - ListSeparator FS; - for (auto arg : Args) { - Out << FS; - Out << arg; - } - Out << ")"; +void AssemblyWriter::printArgs(ArrayRef<uint64_t> Args) { + Out << "args: (" << llvm::interleaved(Args) << ')'; } void AssemblyWriter::printWPDRes(const WholeProgramDevirtResolution &WPDRes) { @@ -3658,7 +3647,7 @@ void AssemblyWriter::printVFuncId(const FunctionSummary::VFuncId VFId) { } void AssemblyWriter::printNonConstVCalls( - const std::vector<FunctionSummary::VFuncId> &VCallList, const char *Tag) { + ArrayRef<FunctionSummary::VFuncId> VCallList, const char *Tag) { Out << Tag << ": ("; ListSeparator FS; for (auto &VFuncId : VCallList) { @@ -3669,8 +3658,7 @@ void AssemblyWriter::printNonConstVCalls( } void AssemblyWriter::printConstVCalls( - const std::vector<FunctionSummary::ConstVCall> &VCallList, - const char *Tag) { + ArrayRef<FunctionSummary::ConstVCall> VCallList, const char *Tag) { Out << Tag << ": ("; ListSeparator FS; for (auto &ConstVCall : VCallList) { @@ -3793,7 +3781,7 @@ void AssemblyWriter::printNamedMDNode(const NamedMDNode *NMD) { Out << "}\n"; } -static void PrintVisibility(GlobalValue::VisibilityTypes Vis, +static void printVisibility(GlobalValue::VisibilityTypes Vis, formatted_raw_ostream &Out) { switch (Vis) { case GlobalValue::DefaultVisibility: break; @@ -3802,13 +3790,13 @@ static void PrintVisibility(GlobalValue::VisibilityTypes Vis, } } -static void PrintDSOLocation(const GlobalValue &GV, +static void printDSOLocation(const GlobalValue &GV, formatted_raw_ostream &Out) { if (GV.isDSOLocal() && !GV.isImplicitDSOLocal()) Out << "dso_local "; } -static void PrintDLLStorageClass(GlobalValue::DLLStorageClassTypes SCT, +static void printDLLStorageClass(GlobalValue::DLLStorageClassTypes SCT, formatted_raw_ostream &Out) { switch (SCT) { case GlobalValue::DefaultStorageClass: break; @@ -3817,7 +3805,7 @@ static void PrintDLLStorageClass(GlobalValue::DLLStorageClassTypes SCT, } } -static void PrintThreadLocalModel(GlobalVariable::ThreadLocalMode TLM, +static void printThreadLocalModel(GlobalVariable::ThreadLocalMode TLM, formatted_raw_ostream &Out) { switch (TLM) { case GlobalVariable::NotThreadLocal: @@ -3863,7 +3851,7 @@ static void maybePrintComdat(formatted_raw_ostream &Out, return; Out << '('; - PrintLLVMName(Out, C->getName(), ComdatPrefix); + printLLVMName(Out, C->getName(), ComdatPrefix); Out << ')'; } @@ -3872,17 +3860,17 @@ void AssemblyWriter::printGlobal(const GlobalVariable *GV) { Out << "; Materializable\n"; AsmWriterContext WriterCtx(&TypePrinter, &Machine, GV->getParent()); - WriteAsOperandInternal(Out, GV, WriterCtx); + writeAsOperandInternal(Out, GV, WriterCtx); Out << " = "; if (!GV->hasInitializer() && GV->hasExternalLinkage()) Out << "external "; Out << getLinkageNameWithSpace(GV->getLinkage()); - PrintDSOLocation(*GV, Out); - PrintVisibility(GV->getVisibility(), Out); - PrintDLLStorageClass(GV->getDLLStorageClass(), Out); - PrintThreadLocalModel(GV->getThreadLocalMode(), Out); + printDSOLocation(*GV, Out); + printVisibility(GV->getVisibility(), Out); + printDLLStorageClass(GV->getDLLStorageClass(), Out); + printThreadLocalModel(GV->getThreadLocalMode(), Out); StringRef UA = getUnnamedAddrEncoding(GV->getUnnamedAddr()); if (!UA.empty()) Out << UA << ' '; @@ -3963,14 +3951,14 @@ void AssemblyWriter::printAlias(const GlobalAlias *GA) { Out << "; Materializable\n"; AsmWriterContext WriterCtx(&TypePrinter, &Machine, GA->getParent()); - WriteAsOperandInternal(Out, GA, WriterCtx); + writeAsOperandInternal(Out, GA, WriterCtx); Out << " = "; Out << getLinkageNameWithSpace(GA->getLinkage()); - PrintDSOLocation(*GA, Out); - PrintVisibility(GA->getVisibility(), Out); - PrintDLLStorageClass(GA->getDLLStorageClass(), Out); - PrintThreadLocalModel(GA->getThreadLocalMode(), Out); + printDSOLocation(*GA, Out); + printVisibility(GA->getVisibility(), Out); + printDLLStorageClass(GA->getDLLStorageClass(), Out); + printThreadLocalModel(GA->getThreadLocalMode(), Out); StringRef UA = getUnnamedAddrEncoding(GA->getUnnamedAddr()); if (!UA.empty()) Out << UA << ' '; @@ -4002,12 +3990,12 @@ void AssemblyWriter::printIFunc(const GlobalIFunc *GI) { Out << "; Materializable\n"; AsmWriterContext WriterCtx(&TypePrinter, &Machine, GI->getParent()); - WriteAsOperandInternal(Out, GI, WriterCtx); + writeAsOperandInternal(Out, GI, WriterCtx); Out << " = "; Out << getLinkageNameWithSpace(GI->getLinkage()); - PrintDSOLocation(*GI, Out); - PrintVisibility(GI->getVisibility(), Out); + printDSOLocation(*GI, Out); + printVisibility(GI->getVisibility(), Out); Out << "ifunc "; @@ -4059,7 +4047,7 @@ void AssemblyWriter::printTypeIdentities() { auto &NamedTypes = TypePrinter.getNamedTypes(); for (StructType *NamedType : NamedTypes) { - PrintLLVMName(Out, NamedType->getName(), LocalPrefix); + printLLVMName(Out, NamedType->getName(), LocalPrefix); Out << " = type "; // Make sure we print out at least one level of the type structure, so @@ -4107,13 +4095,13 @@ void AssemblyWriter::printFunction(const Function *F) { Out << "define "; Out << getLinkageNameWithSpace(F->getLinkage()); - PrintDSOLocation(*F, Out); - PrintVisibility(F->getVisibility(), Out); - PrintDLLStorageClass(F->getDLLStorageClass(), Out); + printDSOLocation(*F, Out); + printVisibility(F->getVisibility(), Out); + printDLLStorageClass(F->getDLLStorageClass(), Out); // Print the calling convention. if (F->getCallingConv() != CallingConv::C) { - PrintCallingConv(F->getCallingConv(), Out); + printCallingConv(F->getCallingConv(), Out); Out << " "; } @@ -4123,7 +4111,7 @@ void AssemblyWriter::printFunction(const Function *F) { TypePrinter.print(F->getReturnType(), Out); AsmWriterContext WriterCtx(&TypePrinter, &Machine, F->getParent()); Out << ' '; - WriteAsOperandInternal(Out, F, WriterCtx); + writeAsOperandInternal(Out, F, WriterCtx); Out << '('; // Loop over the arguments, printing them... @@ -4239,7 +4227,7 @@ void AssemblyWriter::printArgument(const Argument *Arg, AttributeSet Attrs) { // Output name, if available... if (Arg->hasName()) { Out << ' '; - PrintLLVMName(Out, Arg); + printLLVMName(Out, Arg); } else { int Slot = Machine.getLocalSlot(Arg); assert(Slot != -1 && "expect argument in function here"); @@ -4252,7 +4240,7 @@ void AssemblyWriter::printBasicBlock(const BasicBlock *BB) { bool IsEntryBlock = BB->getParent() && BB->isEntryBlock(); if (BB->hasName()) { // Print out the label if it exists... Out << "\n"; - PrintLLVMName(Out, BB->getName(), LabelPrefix); + printLLVMName(Out, BB->getName(), LabelPrefix); Out << ':'; } else if (!IsEntryBlock) { Out << "\n"; @@ -4370,7 +4358,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { // Print out name if it exists... if (I.hasName()) { - PrintLLVMName(Out, &I); + printLLVMName(Out, &I); Out << " = "; } else if (!I.getType()->isVoidTy()) { // Print out the def slot taken. @@ -4381,7 +4369,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << '%' << SlotNum << " = "; } - if (const CallInst *CI = dyn_cast<CallInst>(&I)) { + if (const auto *CI = dyn_cast<CallInst>(&I)) { if (CI->isMustTailCall()) Out << "musttail "; else if (CI->isTailCall()) @@ -4409,14 +4397,14 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << " volatile"; // Print out optimization information. - WriteOptimizationInfo(Out, &I); + writeOptimizationInfo(Out, &I); // Print out the compare instruction predicates - if (const CmpInst *CI = dyn_cast<CmpInst>(&I)) + if (const auto *CI = dyn_cast<CmpInst>(&I)) Out << ' ' << CI->getPredicate(); // Print out the atomicrmw operation - if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) + if (const auto *RMWI = dyn_cast<AtomicRMWInst>(&I)) Out << ' ' << AtomicRMWInst::getOperationName(RMWI->getOperation()); // Print out the type of the operands... @@ -4459,29 +4447,32 @@ void AssemblyWriter::printInstruction(const Instruction &I) { writeOperand(I.getOperand(i), true); } Out << ']'; - } else if (const PHINode *PN = dyn_cast<PHINode>(&I)) { + } else if (const auto *PN = dyn_cast<PHINode>(&I)) { Out << ' '; TypePrinter.print(I.getType(), Out); Out << ' '; ListSeparator LS; - for (unsigned op = 0, Eop = PN->getNumIncomingValues(); op < Eop; ++op) { + for (const auto &[V, Block] : + zip_equal(PN->incoming_values(), PN->blocks())) { Out << LS << "[ "; - writeOperand(PN->getIncomingValue(op), false); Out << ", "; - writeOperand(PN->getIncomingBlock(op), false); Out << " ]"; + writeOperand(V, false); + Out << ", "; + writeOperand(Block, false); + Out << " ]"; } - } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&I)) { + } else if (const auto *EVI = dyn_cast<ExtractValueInst>(&I)) { Out << ' '; writeOperand(I.getOperand(0), true); - for (unsigned i : EVI->indices()) - Out << ", " << i; - } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&I)) { + Out << ", "; + Out << llvm::interleaved(EVI->indices()); + } else if (const auto *IVI = dyn_cast<InsertValueInst>(&I)) { Out << ' '; writeOperand(I.getOperand(0), true); Out << ", "; writeOperand(I.getOperand(1), true); - for (unsigned i : IVI->indices()) - Out << ", " << i; - } else if (const LandingPadInst *LPI = dyn_cast<LandingPadInst>(&I)) { + Out << ", "; + Out << llvm::interleaved(IVI->indices()); + } else if (const auto *LPI = dyn_cast<LandingPadInst>(&I)) { Out << ' '; TypePrinter.print(I.getType(), Out); if (LPI->isCleanup() || LPI->getNumClauses() != 0) @@ -4540,11 +4531,11 @@ void AssemblyWriter::printInstruction(const Instruction &I) { writeOperand(CRI->getOperand(1), /*PrintType=*/true); else Out << "to caller"; - } else if (const CallInst *CI = dyn_cast<CallInst>(&I)) { + } else if (const auto *CI = dyn_cast<CallInst>(&I)) { // Print the calling convention being used. if (CI->getCallingConv() != CallingConv::C) { Out << " "; - PrintCallingConv(CI->getCallingConv(), Out); + printCallingConv(CI->getCallingConv(), Out); } Operand = CI->getCalledOperand(); @@ -4587,7 +4578,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << " #" << Machine.getAttributeGroupSlot(PAL.getFnAttrs()); writeOperandBundles(CI); - } else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I)) { + } else if (const auto *II = dyn_cast<InvokeInst>(&I)) { Operand = II->getCalledOperand(); FunctionType *FTy = II->getFunctionType(); Type *RetTy = FTy->getReturnType(); @@ -4596,7 +4587,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { // Print the calling convention being used. if (II->getCallingConv() != CallingConv::C) { Out << " "; - PrintCallingConv(II->getCallingConv(), Out); + printCallingConv(II->getCallingConv(), Out); } if (PAL.hasRetAttrs()) @@ -4630,7 +4621,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { writeOperand(II->getNormalDest(), true); Out << " unwind "; writeOperand(II->getUnwindDest(), true); - } else if (const CallBrInst *CBI = dyn_cast<CallBrInst>(&I)) { + } else if (const auto *CBI = dyn_cast<CallBrInst>(&I)) { Operand = CBI->getCalledOperand(); FunctionType *FTy = CBI->getFunctionType(); Type *RetTy = FTy->getReturnType(); @@ -4639,7 +4630,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { // Print the calling convention being used. if (CBI->getCallingConv() != CallingConv::C) { Out << " "; - PrintCallingConv(CBI->getCallingConv(), Out); + printCallingConv(CBI->getCallingConv(), Out); } if (PAL.hasRetAttrs()) @@ -4675,7 +4666,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { writeOperand(Dest, true); } Out << ']'; - } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { + } else if (const auto *AI = dyn_cast<AllocaInst>(&I)) { Out << ' '; if (AI->isUsedWithInAlloca()) Out << "inalloca "; @@ -4697,9 +4688,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) { } unsigned AddrSpace = AI->getAddressSpace(); - if (AddrSpace != 0) { + if (AddrSpace != 0) Out << ", addrspace(" << AddrSpace << ')'; - } } else if (isa<CastInst>(I)) { if (Operand) { Out << ' '; @@ -4714,7 +4704,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { } Out << ", "; TypePrinter.print(I.getType(), Out); - } else if (Operand) { // Print the normal way. + } else if (Operand) { // Print the normal way. if (const auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { Out << ' '; TypePrinter.print(GEP->getSourceElementType(), Out); @@ -4763,28 +4753,28 @@ void AssemblyWriter::printInstruction(const Instruction &I) { } // Print atomic ordering/alignment for memory operations - if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { + if (const auto *LI = dyn_cast<LoadInst>(&I)) { if (LI->isAtomic()) writeAtomic(LI->getContext(), LI->getOrdering(), LI->getSyncScopeID()); if (MaybeAlign A = LI->getAlign()) Out << ", align " << A->value(); - } else if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) { + } else if (const auto *SI = dyn_cast<StoreInst>(&I)) { if (SI->isAtomic()) writeAtomic(SI->getContext(), SI->getOrdering(), SI->getSyncScopeID()); if (MaybeAlign A = SI->getAlign()) Out << ", align " << A->value(); - } else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) { + } else if (const auto *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) { writeAtomicCmpXchg(CXI->getContext(), CXI->getSuccessOrdering(), CXI->getFailureOrdering(), CXI->getSyncScopeID()); Out << ", align " << CXI->getAlign().value(); - } else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) { + } else if (const auto *RMWI = dyn_cast<AtomicRMWInst>(&I)) { writeAtomic(RMWI->getContext(), RMWI->getOrdering(), RMWI->getSyncScopeID()); Out << ", align " << RMWI->getAlign().value(); - } else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) { + } else if (const auto *FI = dyn_cast<FenceInst>(&I)) { writeAtomic(FI->getContext(), FI->getOrdering(), FI->getSyncScopeID()); - } else if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(&I)) { - PrintShuffleMask(Out, SVI->getType(), SVI->getShuffleMask()); + } else if (const auto *SVI = dyn_cast<ShuffleVectorInst>(&I)) { + printShuffleMask(Out, SVI->getType(), SVI->getShuffleMask()); } // Print Metadata info. @@ -4840,7 +4830,7 @@ void AssemblyWriter::printDbgVariableRecord(const DbgVariableRecord &DVR) { if (!M) Out << "(null)"; else - WriteAsOperandInternal(Out, M, WriterCtx, true); + writeAsOperandInternal(Out, M, WriterCtx, true); }; Out << "("; @@ -4874,9 +4864,9 @@ void AssemblyWriter::printDbgRecordLine(const DbgRecord &DR) { void AssemblyWriter::printDbgLabelRecord(const DbgLabelRecord &Label) { auto WriterCtx = getContext(); Out << "#dbg_label("; - WriteAsOperandInternal(Out, Label.getRawLabel(), WriterCtx, true); + writeAsOperandInternal(Out, Label.getRawLabel(), WriterCtx, true); Out << ", "; - WriteAsOperandInternal(Out, Label.getDebugLoc(), WriterCtx, true); + writeAsOperandInternal(Out, Label.getDebugLoc(), WriterCtx, true); Out << ")"; } @@ -4899,7 +4889,7 @@ void AssemblyWriter::printMetadataAttachments( } else Out << "!<unknown kind #" << Kind << ">"; Out << ' '; - WriteAsOperandInternal(Out, I.second, WriterCtx); + writeAsOperandInternal(Out, I.second, WriterCtx); } } @@ -4922,7 +4912,7 @@ void AssemblyWriter::writeAllMDNodes() { void AssemblyWriter::printMDNodeBody(const MDNode *Node) { auto WriterCtx = getContext(); - WriteMDNodeBodyInternal(Out, Node, WriterCtx); + writeMDNodeBodyInternal(Out, Node, WriterCtx); } void AssemblyWriter::writeAttribute(const Attribute &Attr, bool InAttrGroup) { @@ -4941,12 +4931,10 @@ void AssemblyWriter::writeAttribute(const Attribute &Attr, bool InAttrGroup) { void AssemblyWriter::writeAttributeSet(const AttributeSet &AttrSet, bool InAttrGroup) { - bool FirstAttr = true; + ListSeparator LS(" "); for (const auto &Attr : AttrSet) { - if (!FirstAttr) - Out << ' '; + Out << LS; writeAttribute(Attr, InAttrGroup); - FirstAttr = false; } } @@ -4963,7 +4951,7 @@ void AssemblyWriter::writeAllAttributeGroups() { } void AssemblyWriter::printUseListOrder(const Value *V, - const std::vector<unsigned> &Shuffle) { + ArrayRef<unsigned> Shuffle) { bool IsInFunction = Machine.getFunction(); if (IsInFunction) Out << " "; @@ -5052,7 +5040,7 @@ void NamedMDNode::print(raw_ostream &ROS, ModuleSlotTracker &MST, } void Comdat::print(raw_ostream &ROS, bool /*IsForDebug*/) const { - PrintLLVMName(ROS, getName(), ComdatPrefix); + printLLVMName(ROS, getName(), ComdatPrefix); ROS << " = comdat "; switch (getSelectionKind()) { @@ -5084,7 +5072,7 @@ void Type::print(raw_ostream &OS, bool /*IsForDebug*/, bool NoDetails) const { return; // If the type is a named struct type, print the body as well. - if (StructType *STy = dyn_cast<StructType>(const_cast<Type*>(this))) + if (auto *STy = dyn_cast<StructType>(const_cast<Type *>(this))) if (!STy->isLiteral()) { OS << " = type "; TP.printStructBody(STy, OS); @@ -5120,11 +5108,9 @@ void DbgMarker::print(raw_ostream &ROS, ModuleSlotTracker &MST, SlotTracker EmptySlotTable(static_cast<const Module *>(nullptr)); SlotTracker &SlotTable = MST.getMachine() ? *MST.getMachine() : EmptySlotTable; - auto incorporateFunction = [&](const Function *F) { - if (F) - MST.incorporateFunction(*F); - }; - incorporateFunction(getParent() ? getParent()->getParent() : nullptr); + const Function *F = getParent() ? getParent()->getParent() : nullptr; + if (F) + MST.incorporateFunction(*F); AssemblyWriter W(OS, SlotTable, getModuleFromDPI(this), nullptr, IsForDebug); W.printDbgMarker(*this); } @@ -5141,13 +5127,11 @@ void DbgVariableRecord::print(raw_ostream &ROS, ModuleSlotTracker &MST, SlotTracker EmptySlotTable(static_cast<const Module *>(nullptr)); SlotTracker &SlotTable = MST.getMachine() ? *MST.getMachine() : EmptySlotTable; - auto incorporateFunction = [&](const Function *F) { - if (F) - MST.incorporateFunction(*F); - }; - incorporateFunction(Marker && Marker->getParent() + const Function *F = Marker && Marker->getParent() ? Marker->getParent()->getParent() - : nullptr); + : nullptr; + if (F) + MST.incorporateFunction(*F); AssemblyWriter W(OS, SlotTable, getModuleFromDPI(this), nullptr, IsForDebug); W.printDbgVariableRecord(*this); } @@ -5158,12 +5142,11 @@ void DbgLabelRecord::print(raw_ostream &ROS, ModuleSlotTracker &MST, SlotTracker EmptySlotTable(static_cast<const Module *>(nullptr)); SlotTracker &SlotTable = MST.getMachine() ? *MST.getMachine() : EmptySlotTable; - auto incorporateFunction = [&](const Function *F) { - if (F) - MST.incorporateFunction(*F); - }; - incorporateFunction(Marker->getParent() ? Marker->getParent()->getParent() - : nullptr); + const Function *F = + Marker->getParent() ? Marker->getParent()->getParent() : nullptr; + if (F) + MST.incorporateFunction(*F); + AssemblyWriter W(OS, SlotTable, getModuleFromDPI(this), nullptr, IsForDebug); W.printDbgLabelRecord(*this); } @@ -5185,39 +5168,39 @@ void Value::print(raw_ostream &ROS, ModuleSlotTracker &MST, SlotTracker EmptySlotTable(static_cast<const Module *>(nullptr)); SlotTracker &SlotTable = MST.getMachine() ? *MST.getMachine() : EmptySlotTable; - auto incorporateFunction = [&](const Function *F) { + auto IncorporateFunction = [&](const Function *F) { if (F) MST.incorporateFunction(*F); }; - if (const Instruction *I = dyn_cast<Instruction>(this)) { - incorporateFunction(I->getParent() ? I->getParent()->getParent() : nullptr); + if (const auto *I = dyn_cast<Instruction>(this)) { + IncorporateFunction(I->getParent() ? I->getParent()->getParent() : nullptr); AssemblyWriter W(OS, SlotTable, getModuleFromVal(I), nullptr, IsForDebug); W.printInstruction(*I); - } else if (const BasicBlock *BB = dyn_cast<BasicBlock>(this)) { - incorporateFunction(BB->getParent()); + } else if (const auto *BB = dyn_cast<BasicBlock>(this)) { + IncorporateFunction(BB->getParent()); AssemblyWriter W(OS, SlotTable, getModuleFromVal(BB), nullptr, IsForDebug); W.printBasicBlock(BB); - } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(this)) { + } else if (const auto *GV = dyn_cast<GlobalValue>(this)) { AssemblyWriter W(OS, SlotTable, GV->getParent(), nullptr, IsForDebug); - if (const GlobalVariable *V = dyn_cast<GlobalVariable>(GV)) + if (const auto *V = dyn_cast<GlobalVariable>(GV)) W.printGlobal(V); - else if (const Function *F = dyn_cast<Function>(GV)) + else if (const auto *F = dyn_cast<Function>(GV)) W.printFunction(F); - else if (const GlobalAlias *A = dyn_cast<GlobalAlias>(GV)) + else if (const auto *A = dyn_cast<GlobalAlias>(GV)) W.printAlias(A); - else if (const GlobalIFunc *I = dyn_cast<GlobalIFunc>(GV)) + else if (const auto *I = dyn_cast<GlobalIFunc>(GV)) W.printIFunc(I); else llvm_unreachable("Unknown GlobalValue to print out!"); - } else if (const MetadataAsValue *V = dyn_cast<MetadataAsValue>(this)) { + } else if (const auto *V = dyn_cast<MetadataAsValue>(this)) { V->getMetadata()->print(ROS, MST, getModuleFromVal(V)); - } else if (const Constant *C = dyn_cast<Constant>(this)) { + } else if (const auto *C = dyn_cast<Constant>(this)) { TypePrinting TypePrinter; TypePrinter.print(C->getType(), OS); OS << ' '; AsmWriterContext WriterCtx(&TypePrinter, MST.getMachine()); - WriteConstantInternal(OS, C, WriterCtx); + writeConstantInternal(OS, C, WriterCtx); } else if (isa<InlineAsm>(this) || isa<Argument>(this)) { this->printAsOperand(OS, /* PrintType */ true, MST); } else { @@ -5233,7 +5216,7 @@ static bool printWithoutType(const Value &V, raw_ostream &O, if (V.hasName() || isa<GlobalValue>(V) || (!isa<Constant>(V) && !isa<MetadataAsValue>(V))) { AsmWriterContext WriterCtx(nullptr, Machine, M); - WriteAsOperandInternal(O, &V, WriterCtx); + writeAsOperandInternal(O, &V, WriterCtx); return true; } return false; @@ -5243,7 +5226,7 @@ static void printAsOperandImpl(const Value &V, raw_ostream &O, bool PrintType, ModuleSlotTracker &MST) { TypePrinting TypePrinter(MST.getModule()); AsmWriterContext WriterCtx(&TypePrinter, MST.getMachine(), MST.getModule()); - WriteAsOperandInternal(O, &V, WriterCtx, PrintType); + writeAsOperandInternal(O, &V, WriterCtx, PrintType); } void Value::printAsOperand(raw_ostream &O, bool PrintType, @@ -5274,14 +5257,14 @@ void Value::printAsOperand(raw_ostream &O, bool PrintType, static void printMetadataImplRec(raw_ostream &ROS, const Metadata &MD, AsmWriterContext &WriterCtx) { formatted_raw_ostream OS(ROS); - WriteAsOperandInternal(OS, &MD, WriterCtx, /* FromValue */ true); + writeAsOperandInternal(OS, &MD, WriterCtx, /* FromValue */ true); auto *N = dyn_cast<MDNode>(&MD); if (!N || isa<DIExpression>(MD)) return; OS << " = "; - WriteMDNodeBodyInternal(OS, N, WriterCtx); + writeMDNodeBodyInternal(OS, N, WriterCtx); } namespace { @@ -5342,14 +5325,14 @@ static void printMetadataImpl(raw_ostream &ROS, const Metadata &MD, WriterCtx = std::make_unique<AsmWriterContext>(&TypePrinter, MST.getMachine(), M); - WriteAsOperandInternal(OS, &MD, *WriterCtx, /* FromValue */ true); + writeAsOperandInternal(OS, &MD, *WriterCtx, /* FromValue */ true); auto *N = dyn_cast<MDNode>(&MD); if (OnlyAsOperand || !N || isa<DIExpression>(MD)) return; OS << " = "; - WriteMDNodeBodyInternal(OS, N, *WriterCtx); + writeMDNodeBodyInternal(OS, N, *WriterCtx); } void Metadata::printAsOperand(raw_ostream &OS, const Module *M) const { diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp index a8bb34f..33ca46c 100644 --- a/llvm/lib/IR/Instruction.cpp +++ b/llvm/lib/IR/Instruction.cpp @@ -30,6 +30,8 @@ #include "llvm/Support/Compiler.h" using namespace llvm; +namespace llvm { + // FIXME: Flag used for an ablation performance test, Issue #147390. Placing it // here because referencing IR should be feasible from anywhere. Will be // removed after the ablation test. @@ -38,6 +40,8 @@ cl::opt<bool> ProfcheckDisableMetadataFixes( cl::desc( "Disable metadata propagation fixes discovered through Issue #147390")); +} // end namespace llvm + InsertPosition::InsertPosition(Instruction *InsertBefore) : InsertAt(InsertBefore ? InsertBefore->getIterator() : InstListType::iterator()) {} diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp index dd83168..941e41f 100644 --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -4141,23 +4141,6 @@ void SwitchInst::growOperands() { growHungoffUses(ReservedSpace); } -MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() { - assert(Changed && "called only if metadata has changed"); - - if (!Weights) - return nullptr; - - assert(SI.getNumSuccessors() == Weights->size() && - "num of prof branch_weights must accord with num of successors"); - - bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; }); - - if (AllZeroes || Weights->size() < 2) - return nullptr; - - return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights); -} - void SwitchInstProfUpdateWrapper::init() { MDNode *ProfileData = getBranchWeightMDNode(SI); if (!ProfileData) diff --git a/llvm/lib/IR/ProfDataUtils.cpp b/llvm/lib/IR/ProfDataUtils.cpp index 99029c1..edeca97 100644 --- a/llvm/lib/IR/ProfDataUtils.cpp +++ b/llvm/lib/IR/ProfDataUtils.cpp @@ -12,6 +12,7 @@ #include "llvm/IR/ProfDataUtils.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" @@ -19,6 +20,7 @@ #include "llvm/IR/LLVMContext.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Metadata.h" +#include "llvm/Support/CommandLine.h" using namespace llvm; @@ -84,10 +86,31 @@ static void extractFromBranchWeightMD(const MDNode *ProfileData, } } +/// Push the weights right to fit in uint32_t. +static SmallVector<uint32_t> fitWeights(ArrayRef<uint64_t> Weights) { + SmallVector<uint32_t> Ret; + Ret.reserve(Weights.size()); + uint64_t Max = *llvm::max_element(Weights); + if (Max > UINT_MAX) { + unsigned Offset = 32 - llvm::countl_zero(Max); + for (const uint64_t &Value : Weights) + Ret.push_back(static_cast<uint32_t>(Value >> Offset)); + } else { + append_range(Ret, Weights); + } + return Ret; +} + } // namespace namespace llvm { - +cl::opt<bool> ElideAllZeroBranchWeights("elide-all-zero-branch-weights", +#if defined(LLVM_ENABLE_PROFCHECK) + cl::init(false) +#else + cl::init(true) +#endif +); const char *MDProfLabels::BranchWeights = "branch_weights"; const char *MDProfLabels::ExpectedBranchWeights = "expected"; const char *MDProfLabels::ValueProfile = "VP"; @@ -282,12 +305,23 @@ bool hasExplicitlyUnknownBranchWeights(const Instruction &I) { } void setBranchWeights(Instruction &I, ArrayRef<uint32_t> Weights, - bool IsExpected) { + bool IsExpected, bool ElideAllZero) { + if ((ElideAllZeroBranchWeights && ElideAllZero) && + llvm::all_of(Weights, [](uint32_t V) { return V == 0; })) { + I.setMetadata(LLVMContext::MD_prof, nullptr); + return; + } + MDBuilder MDB(I.getContext()); MDNode *BranchWeights = MDB.createBranchWeights(Weights, IsExpected); I.setMetadata(LLVMContext::MD_prof, BranchWeights); } +void setFittedBranchWeights(Instruction &I, ArrayRef<uint64_t> Weights, + bool IsExpected, bool ElideAllZero) { + setBranchWeights(I, fitWeights(Weights), IsExpected, ElideAllZero); +} + SmallVector<uint32_t> downscaleWeights(ArrayRef<uint64_t> Weights, std::optional<uint64_t> KnownMaxCount) { uint64_t MaxCount = KnownMaxCount.has_value() ? KnownMaxCount.value() diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp index e5e062d..a347609 100644 --- a/llvm/lib/IR/Value.cpp +++ b/llvm/lib/IR/Value.cpp @@ -36,7 +36,7 @@ using namespace llvm; -cl::opt<bool> UseDerefAtPointSemantics( +static cl::opt<bool> UseDerefAtPointSemantics( "use-dereferenceable-at-point-semantics", cl::Hidden, cl::init(false), cl::desc("Deref attributes and metadata infer facts at definition only")); diff --git a/llvm/lib/LTO/LTO.cpp b/llvm/lib/LTO/LTO.cpp index 7b25262..e6544f3 100644 --- a/llvm/lib/LTO/LTO.cpp +++ b/llvm/lib/LTO/LTO.cpp @@ -75,9 +75,10 @@ static cl::opt<bool> DumpThinCGSCCs("dump-thin-cg-sccs", cl::init(false), cl::Hidden, cl::desc("Dump the SCCs in the ThinLTO index's callgraph")); +namespace llvm { extern cl::opt<bool> CodeGenDataThinLTOTwoRounds; - extern cl::opt<bool> ForceImportAll; +} // end namespace llvm namespace llvm { /// Enable global value internalization in LTO. diff --git a/llvm/lib/Object/OffloadBundle.cpp b/llvm/lib/Object/OffloadBundle.cpp index 0dd378e..329dcbf 100644 --- a/llvm/lib/Object/OffloadBundle.cpp +++ b/llvm/lib/Object/OffloadBundle.cpp @@ -120,14 +120,15 @@ OffloadBundleFatBin::create(MemoryBufferRef Buf, uint64_t SectionOffset, if (identify_magic(Buf.getBuffer()) != file_magic::offload_bundle) return errorCodeToError(object_error::parse_failed); - OffloadBundleFatBin *TheBundle = new OffloadBundleFatBin(Buf, FileName); + std::unique_ptr<OffloadBundleFatBin> TheBundle( + new OffloadBundleFatBin(Buf, FileName)); // Read the Bundle Entries Error Err = TheBundle->readEntries(Buf.getBuffer(), SectionOffset); if (Err) return Err; - return std::unique_ptr<OffloadBundleFatBin>(TheBundle); + return std::move(TheBundle); } Error OffloadBundleFatBin::extractBundle(const ObjectFile &Source) { diff --git a/llvm/lib/ObjectYAML/DXContainerYAML.cpp b/llvm/lib/ObjectYAML/DXContainerYAML.cpp index 3c09ae4..5dff9ba 100644 --- a/llvm/lib/ObjectYAML/DXContainerYAML.cpp +++ b/llvm/lib/ObjectYAML/DXContainerYAML.cpp @@ -154,7 +154,7 @@ DXContainerYAML::RootSignatureYamlDesc::create( if (Error E = readDescriptorRanges<dxbc::RTS0::v1::DescriptorRange>( Header, RootSigDesc, DTV)) return std::move(E); - } else if (Version == 2) { + } else if (Version == 2 || Version == 3) { if (Error E = readDescriptorRanges<dxbc::RTS0::v2::DescriptorRange>( Header, RootSigDesc, DTV)) return std::move(E); diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp index 256cf9d..7069e8d 100644 --- a/llvm/lib/Passes/PassBuilderPipelines.cpp +++ b/llvm/lib/Passes/PassBuilderPipelines.cpp @@ -150,6 +150,8 @@ using namespace llvm; +namespace llvm { + static cl::opt<InliningAdvisorMode> UseInlineAdvisor( "enable-ml-inliner", cl::init(InliningAdvisorMode::Default), cl::Hidden, cl::desc("Enable ML policy for inliner. Currently trained for -Oz only"), @@ -305,7 +307,6 @@ static cl::opt<std::string> InstrumentColdFuncOnlyPath( extern cl::opt<std::string> UseCtxProfile; extern cl::opt<bool> PGOInstrumentColdFunctionOnly; -namespace llvm { extern cl::opt<bool> EnableMemProfContextDisambiguation; } // namespace llvm @@ -610,7 +611,9 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level, // Jump table to switch conversion. if (EnableJumpTableToSwitch) - FPM.addPass(JumpTableToSwitchPass()); + FPM.addPass(JumpTableToSwitchPass( + /*InLTO=*/Phase == ThinOrFullLTOPhase::ThinLTOPostLink || + Phase == ThinOrFullLTOPhase::FullLTOPostLink)); FPM.addPass( SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true))); diff --git a/llvm/lib/ProfileData/MemProfCommon.cpp b/llvm/lib/ProfileData/MemProfCommon.cpp index a13a291..cfd2efd 100644 --- a/llvm/lib/ProfileData/MemProfCommon.cpp +++ b/llvm/lib/ProfileData/MemProfCommon.cpp @@ -20,6 +20,8 @@ using namespace llvm; using namespace llvm::memprof; +namespace llvm { + // Upper bound on lifetime access density (accesses per byte per lifetime sec) // for marking an allocation cold. LLVM_ABI cl::opt<float> MemProfLifetimeAccessDensityColdThreshold( @@ -48,6 +50,8 @@ LLVM_ABI cl::opt<bool> cl::desc("Enable use of hot hints (only supported for " "unambigously hot allocations)")); +} // end namespace llvm + AllocationType llvm::memprof::getAllocType(uint64_t TotalLifetimeAccessDensity, uint64_t AllocCount, uint64_t TotalLifetime) { diff --git a/llvm/lib/Support/Path.cpp b/llvm/lib/Support/Path.cpp index 761d29e..3e06666 100644 --- a/llvm/lib/Support/Path.cpp +++ b/llvm/lib/Support/Path.cpp @@ -700,6 +700,55 @@ bool is_relative(const Twine &path, Style style) { return !is_absolute(path, style); } +void make_absolute(const Twine ¤t_directory, + SmallVectorImpl<char> &path) { + StringRef p(path.data(), path.size()); + + bool rootDirectory = has_root_directory(p); + bool rootName = has_root_name(p); + + // Already absolute. + if ((rootName || is_style_posix(Style::native)) && rootDirectory) + return; + + // All the following conditions will need the current directory. + SmallString<128> current_dir; + current_directory.toVector(current_dir); + + // Relative path. Prepend the current directory. + if (!rootName && !rootDirectory) { + // Append path to the current directory. + append(current_dir, p); + // Set path to the result. + path.swap(current_dir); + return; + } + + if (!rootName && rootDirectory) { + StringRef cdrn = root_name(current_dir); + SmallString<128> curDirRootName(cdrn.begin(), cdrn.end()); + append(curDirRootName, p); + // Set path to the result. + path.swap(curDirRootName); + return; + } + + if (rootName && !rootDirectory) { + StringRef pRootName = root_name(p); + StringRef bRootDirectory = root_directory(current_dir); + StringRef bRelativePath = relative_path(current_dir); + StringRef pRelativePath = relative_path(p); + + SmallString<128> res; + append(res, pRootName, bRootDirectory, bRelativePath, pRelativePath); + path.swap(res); + return; + } + + llvm_unreachable("All rootName and rootDirectory combinations should have " + "occurred above!"); +} + StringRef remove_leading_dotslash(StringRef Path, Style style) { // Remove leading "./" (or ".//" or "././" etc.) while (Path.size() > 2 && Path[0] == '.' && is_separator(Path[1], style)) { @@ -903,55 +952,6 @@ getPotentiallyUniqueTempFileName(const Twine &Prefix, StringRef Suffix, return createTemporaryFile(Prefix, Suffix, Dummy, ResultPath, FS_Name); } -void make_absolute(const Twine ¤t_directory, - SmallVectorImpl<char> &path) { - StringRef p(path.data(), path.size()); - - bool rootDirectory = path::has_root_directory(p); - bool rootName = path::has_root_name(p); - - // Already absolute. - if ((rootName || is_style_posix(Style::native)) && rootDirectory) - return; - - // All of the following conditions will need the current directory. - SmallString<128> current_dir; - current_directory.toVector(current_dir); - - // Relative path. Prepend the current directory. - if (!rootName && !rootDirectory) { - // Append path to the current directory. - path::append(current_dir, p); - // Set path to the result. - path.swap(current_dir); - return; - } - - if (!rootName && rootDirectory) { - StringRef cdrn = path::root_name(current_dir); - SmallString<128> curDirRootName(cdrn.begin(), cdrn.end()); - path::append(curDirRootName, p); - // Set path to the result. - path.swap(curDirRootName); - return; - } - - if (rootName && !rootDirectory) { - StringRef pRootName = path::root_name(p); - StringRef bRootDirectory = path::root_directory(current_dir); - StringRef bRelativePath = path::relative_path(current_dir); - StringRef pRelativePath = path::relative_path(p); - - SmallString<128> res; - path::append(res, pRootName, bRootDirectory, bRelativePath, pRelativePath); - path.swap(res); - return; - } - - llvm_unreachable("All rootName and rootDirectory combinations should have " - "occurred above!"); -} - std::error_code make_absolute(SmallVectorImpl<char> &path) { if (path::is_absolute(path)) return {}; @@ -960,7 +960,7 @@ std::error_code make_absolute(SmallVectorImpl<char> &path) { if (std::error_code ec = current_path(current_dir)) return ec; - make_absolute(current_dir, path); + path::make_absolute(current_dir, path); return {}; } diff --git a/llvm/lib/Support/ScopedPrinter.cpp b/llvm/lib/Support/ScopedPrinter.cpp index a17e397..efb6178 100644 --- a/llvm/lib/Support/ScopedPrinter.cpp +++ b/llvm/lib/Support/ScopedPrinter.cpp @@ -1,12 +1,17 @@ -#include "llvm/Support/ScopedPrinter.h" +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "llvm/Support/ScopedPrinter.h" #include "llvm/Support/Format.h" -using namespace llvm::support; +using namespace llvm; -namespace llvm { - -raw_ostream &operator<<(raw_ostream &OS, const HexNumber &Value) { +raw_ostream &llvm::operator<<(raw_ostream &OS, const HexNumber &Value) { OS << "0x" << utohexstr(Value.Value); return OS; } @@ -45,5 +50,3 @@ JSONScopedPrinter::JSONScopedPrinter( if (this->OuterScope) this->OuterScope->setPrinter(*this); } - -} // namespace llvm diff --git a/llvm/lib/Support/StringMap.cpp b/llvm/lib/Support/StringMap.cpp index 3432dc1..4aee30c 100644 --- a/llvm/lib/Support/StringMap.cpp +++ b/llvm/lib/Support/StringMap.cpp @@ -83,7 +83,7 @@ unsigned StringMapImpl::LookupBucketFor(StringRef Name, // Hash table unallocated so far? if (NumBuckets == 0) init(16); - if (shouldReverseIterate()) + if constexpr (shouldReverseIterate()) FullHashValue = ~FullHashValue; unsigned BucketNo = FullHashValue & (NumBuckets - 1); unsigned *HashTable = getHashTable(TheTable, NumBuckets); @@ -142,7 +142,7 @@ int StringMapImpl::FindKey(StringRef Key, uint32_t FullHashValue) const { #ifdef EXPENSIVE_CHECKS assert(FullHashValue == hash(Key)); #endif - if (shouldReverseIterate()) + if constexpr (shouldReverseIterate()) FullHashValue = ~FullHashValue; unsigned BucketNo = FullHashValue & (NumBuckets - 1); unsigned *HashTable = getHashTable(TheTable, NumBuckets); diff --git a/llvm/lib/Support/VirtualFileSystem.cpp b/llvm/lib/Support/VirtualFileSystem.cpp index 44d2ee7..c754b30 100644 --- a/llvm/lib/Support/VirtualFileSystem.cpp +++ b/llvm/lib/Support/VirtualFileSystem.cpp @@ -133,7 +133,7 @@ std::error_code FileSystem::makeAbsolute(SmallVectorImpl<char> &Path) const { if (!WorkingDir) return WorkingDir.getError(); - llvm::sys::fs::make_absolute(WorkingDir.get(), Path); + sys::path::make_absolute(WorkingDir.get(), Path); return {}; } @@ -300,7 +300,7 @@ private: if (!WD || !*WD) return Path; Path.toVector(Storage); - sys::fs::make_absolute(WD->get().Resolved, Storage); + sys::path::make_absolute(WD->get().Resolved, Storage); return Storage; } diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index 1e30735..36c9cb6 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -707,16 +707,14 @@ let Predicates = [HasSVE_or_SME] in { defm SDOT_ZZZ : sve_intx_dot<0b0, "sdot", AArch64sdot>; defm UDOT_ZZZ : sve_intx_dot<0b1, "udot", AArch64udot>; - let Predicates = [HasSVE_or_SME] in { - def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)), - (UDOT_ZZZ_BtoS $Acc, $MulLHS, $MulRHS)>; - def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)), - (SDOT_ZZZ_BtoS $Acc, $MulLHS, $MulRHS)>; - def : Pat<(nxv2i64 (partial_reduce_umla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), - (UDOT_ZZZ_HtoD $Acc, $MulLHS, $MulRHS)>; - def : Pat<(nxv2i64 (partial_reduce_smla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), - (SDOT_ZZZ_HtoD $Acc, $MulLHS, $MulRHS)>; - } // End HasSVE_or_SME + def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)), + (UDOT_ZZZ_BtoS $Acc, $MulLHS, $MulRHS)>; + def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)), + (SDOT_ZZZ_BtoS $Acc, $MulLHS, $MulRHS)>; + def : Pat<(nxv2i64 (partial_reduce_umla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), + (UDOT_ZZZ_HtoD $Acc, $MulLHS, $MulRHS)>; + def : Pat<(nxv2i64 (partial_reduce_smla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), + (SDOT_ZZZ_HtoD $Acc, $MulLHS, $MulRHS)>; defm SDOT_ZZZI : sve_intx_dot_by_indexed_elem<0b0, "sdot", int_aarch64_sve_sdot_lane>; defm UDOT_ZZZI : sve_intx_dot_by_indexed_elem<0b1, "udot", int_aarch64_sve_udot_lane>; @@ -3646,6 +3644,9 @@ let Predicates = [HasSVE_or_SME, HasMatMulInt8] in { defm USDOT_ZZZ : sve_int_dot_mixed<"usdot", AArch64usdot>; defm USDOT_ZZZI : sve_int_dot_mixed_indexed<0, "usdot", int_aarch64_sve_usdot_lane>; defm SUDOT_ZZZI : sve_int_dot_mixed_indexed<1, "sudot", int_aarch64_sve_sudot_lane>; + + def : Pat<(nxv4i32 (partial_reduce_sumla nxv4i32:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)), + (USDOT_ZZZ $Acc, $RHS, $LHS)>; } // End HasSVE_or_SME, HasMatMulInt8 let Predicates = [HasSVE, HasMatMulFP32] in { @@ -3752,6 +3753,19 @@ let Predicates = [HasSVE2_or_SME] in { defm UMLSLB_ZZZ : sve2_int_mla_long<0b10110, "umlslb", int_aarch64_sve_umlslb>; defm UMLSLT_ZZZ : sve2_int_mla_long<0b10111, "umlslt", int_aarch64_sve_umlslt>; + def : Pat<(nxv2i64 (partial_reduce_umla nxv2i64:$Acc, nxv4i32:$LHS, nxv4i32:$RHS)), + (UMLALT_ZZZ_D (UMLALB_ZZZ_D $Acc, $LHS, $RHS), $LHS, $RHS)>; + def : Pat<(nxv2i64 (partial_reduce_smla nxv2i64:$Acc, nxv4i32:$LHS, nxv4i32:$RHS)), + (SMLALT_ZZZ_D (SMLALB_ZZZ_D $Acc, $LHS, $RHS), $LHS, $RHS)>; + def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv8i16:$LHS, nxv8i16:$RHS)), + (UMLALT_ZZZ_S (UMLALB_ZZZ_S $Acc, $LHS, $RHS), $LHS, $RHS)>; + def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv8i16:$LHS, nxv8i16:$RHS)), + (SMLALT_ZZZ_S (SMLALB_ZZZ_S $Acc, $LHS, $RHS), $LHS, $RHS)>; + def : Pat<(nxv8i16 (partial_reduce_umla nxv8i16:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)), + (UMLALT_ZZZ_H (UMLALB_ZZZ_H $Acc, $LHS, $RHS), $LHS, $RHS)>; + def : Pat<(nxv8i16 (partial_reduce_smla nxv8i16:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)), + (SMLALT_ZZZ_H (SMLALB_ZZZ_H $Acc, $LHS, $RHS), $LHS, $RHS)>; + // SVE2 saturating multiply-add long (indexed) defm SQDMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0100, "sqdmlalb", int_aarch64_sve_sqdmlalb_lane>; defm SQDMLALT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0101, "sqdmlalt", int_aarch64_sve_sqdmlalt_lane>; @@ -3880,19 +3894,6 @@ let Predicates = [HasSVE2_or_SME] in { def : Pat<(nxv8i16 (partial_reduce_smla nxv8i16:$Acc, nxv16i8:$Input, (nxv16i8 (splat_vector (i32 1))))), (SADDWT_ZZZ_H (SADDWB_ZZZ_H $Acc, $Input), $Input)>; - def : Pat<(nxv2i64 (partial_reduce_umla nxv2i64:$Acc, nxv4i32:$LHS, nxv4i32:$RHS)), - (UMLALT_ZZZ_D (UMLALB_ZZZ_D $Acc, $LHS, $RHS), $LHS, $RHS)>; - def : Pat<(nxv2i64 (partial_reduce_smla nxv2i64:$Acc, nxv4i32:$LHS, nxv4i32:$RHS)), - (SMLALT_ZZZ_D (SMLALB_ZZZ_D $Acc, $LHS, $RHS), $LHS, $RHS)>; - def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv8i16:$LHS, nxv8i16:$RHS)), - (UMLALT_ZZZ_S (UMLALB_ZZZ_S $Acc, $LHS, $RHS), $LHS, $RHS)>; - def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv8i16:$LHS, nxv8i16:$RHS)), - (SMLALT_ZZZ_S (SMLALB_ZZZ_S $Acc, $LHS, $RHS), $LHS, $RHS)>; - def : Pat<(nxv8i16 (partial_reduce_umla nxv8i16:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)), - (UMLALT_ZZZ_H (UMLALB_ZZZ_H $Acc, $LHS, $RHS), $LHS, $RHS)>; - def : Pat<(nxv8i16 (partial_reduce_smla nxv8i16:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)), - (SMLALT_ZZZ_H (SMLALB_ZZZ_H $Acc, $LHS, $RHS), $LHS, $RHS)>; - // SVE2 integer multiply long defm SQDMULLB_ZZZ : sve2_wide_int_arith_long<0b11000, "sqdmullb", int_aarch64_sve_sqdmullb>; defm SQDMULLT_ZZZ : sve2_wide_int_arith_long<0b11001, "sqdmullt", int_aarch64_sve_sqdmullt>; @@ -4200,11 +4201,6 @@ let Predicates = [HasSVEAES2, HasNonStreamingSVE_or_SSVE_AES] in { def PMULL_2ZZZ_Q : sve_crypto_pmull_multi<"pmull">; } -let Predicates = [HasSVE_or_SME, HasMatMulInt8] in { - def : Pat<(nxv4i32 (partial_reduce_sumla nxv4i32:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)), - (USDOT_ZZZ $Acc, $RHS, $LHS)>; - } // End HasSVE_or_SME, HasMatMulInt8 - //===----------------------------------------------------------------------===// // SME or SVE2.1 instructions //===----------------------------------------------------------------------===// @@ -4238,12 +4234,10 @@ defm UDOT_ZZZ_HtoS : sve2p1_two_way_dot_vv<"udot", 0b1, int_aarch64_sve_udot_x2 defm SDOT_ZZZI_HtoS : sve2p1_two_way_dot_vvi<"sdot", 0b0, int_aarch64_sve_sdot_lane_x2>; defm UDOT_ZZZI_HtoS : sve2p1_two_way_dot_vvi<"udot", 0b1, int_aarch64_sve_udot_lane_x2>; -let Predicates = [HasSVE2p1_or_SME2] in { - def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), - (UDOT_ZZZ_HtoS $Acc, $MulLHS, $MulRHS)>; - def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), - (SDOT_ZZZ_HtoS $Acc, $MulLHS, $MulRHS)>; -} // End HasSVE2p1_or_SME2 +def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), + (UDOT_ZZZ_HtoS $Acc, $MulLHS, $MulRHS)>; +def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), + (SDOT_ZZZ_HtoS $Acc, $MulLHS, $MulRHS)>; defm SQCVTN_Z2Z_StoH : sve2p1_multi_vec_extract_narrow<"sqcvtn", 0b00, int_aarch64_sve_sqcvtn_x2>; defm UQCVTN_Z2Z_StoH : sve2p1_multi_vec_extract_narrow<"uqcvtn", 0b01, int_aarch64_sve_uqcvtn_x2>; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index 92a587b..280fbe2 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -1384,6 +1384,11 @@ void AMDGPUPassConfig::addCodeGenPrepare() { if (TM->getTargetTriple().isAMDGCN() && EnableLowerKernelArguments) addPass(createAMDGPULowerKernelArgumentsPass()); + TargetPassConfig::addCodeGenPrepare(); + + if (isPassEnabled(EnableLoadStoreVectorizer)) + addPass(createLoadStoreVectorizerPass()); + if (TM->getTargetTriple().isAMDGCN()) { // This lowering has been placed after codegenprepare to take advantage of // address mode matching (which is why it isn't put with the LDS lowerings). @@ -1392,15 +1397,6 @@ void AMDGPUPassConfig::addCodeGenPrepare() { // but has been put before switch lowering and CFG flattening so that those // passes can run on the more optimized control flow this pass creates in // many cases. - // - // FIXME: This should ideally be put after the LoadStoreVectorizer. - // However, due to some annoying facts about ResourceUsageAnalysis, - // (especially as exercised in the resource-usage-dead-function test), - // we need all the function passes codegenprepare all the way through - // said resource usage analysis to run on the call graph produced - // before codegenprepare runs (because codegenprepare will knock some - // nodes out of the graph, which leads to function-level passes not - // being run on them, which causes crashes in the resource usage analysis). addPass(createAMDGPULowerBufferFatPointersPass()); addPass(createAMDGPULowerIntrinsicsLegacyPass()); // In accordance with the above FIXME, manually force all the @@ -1408,11 +1404,6 @@ void AMDGPUPassConfig::addCodeGenPrepare() { addPass(new DummyCGSCCPass()); } - TargetPassConfig::addCodeGenPrepare(); - - if (isPassEnabled(EnableLoadStoreVectorizer)) - addPass(createLoadStoreVectorizerPass()); - // LowerSwitch pass may introduce unreachable blocks that can // cause unexpected behavior for subsequent passes. Placing it // here seems better that these blocks would get cleaned up by @@ -2125,6 +2116,11 @@ void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const { if (EnableLowerKernelArguments) addPass(AMDGPULowerKernelArgumentsPass(TM)); + Base::addCodeGenPrepare(addPass); + + if (isPassEnabled(EnableLoadStoreVectorizer)) + addPass(LoadStoreVectorizerPass()); + // This lowering has been placed after codegenprepare to take advantage of // address mode matching (which is why it isn't put with the LDS lowerings). // It could be placed anywhere before uniformity annotations (an analysis @@ -2132,25 +2128,11 @@ void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const { // but has been put before switch lowering and CFG flattening so that those // passes can run on the more optimized control flow this pass creates in // many cases. - // - // FIXME: This should ideally be put after the LoadStoreVectorizer. - // However, due to some annoying facts about ResourceUsageAnalysis, - // (especially as exercised in the resource-usage-dead-function test), - // we need all the function passes codegenprepare all the way through - // said resource usage analysis to run on the call graph produced - // before codegenprepare runs (because codegenprepare will knock some - // nodes out of the graph, which leads to function-level passes not - // being run on them, which causes crashes in the resource usage analysis). addPass(AMDGPULowerBufferFatPointersPass(TM)); addPass.requireCGSCCOrder(); addPass(AMDGPULowerIntrinsicsPass(TM)); - Base::addCodeGenPrepare(addPass); - - if (isPassEnabled(EnableLoadStoreVectorizer)) - addPass(LoadStoreVectorizerPass()); - // LowerSwitch pass may introduce unreachable blocks that can cause unexpected // behavior for subsequent passes. Placing it here seems better that these // blocks would get cleaned up by UnreachableBlockElim inserted next in the diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp index fab78a9..bdc0810 100644 --- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp +++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp @@ -29,6 +29,7 @@ #include "SIMachineFunctionInfo.h" #include "Utils/AMDGPUBaseInfo.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/CodeGen/CalcSpillWeights.h" #include "llvm/CodeGen/RegisterClassInfo.h" #include "llvm/MC/LaneBitmask.h" #include "llvm/Support/ErrorHandling.h" @@ -1633,64 +1634,6 @@ void GCNSchedStage::revertScheduling() { DAG.Regions[RegionIdx] = std::pair(DAG.RegionBegin, DAG.RegionEnd); } -bool PreRARematStage::allUsesAvailableAt(const MachineInstr *InstToRemat, - SlotIndex OriginalIdx, - SlotIndex RematIdx) const { - - LiveIntervals *LIS = DAG.LIS; - MachineRegisterInfo &MRI = DAG.MRI; - OriginalIdx = OriginalIdx.getRegSlot(true); - RematIdx = std::max(RematIdx, RematIdx.getRegSlot(true)); - for (const MachineOperand &MO : InstToRemat->operands()) { - if (!MO.isReg() || !MO.getReg() || !MO.readsReg()) - continue; - - if (!MO.getReg().isVirtual()) { - // Do not attempt to reason about PhysRegs - // TODO: better analysis of PhysReg livness - if (!DAG.MRI.isConstantPhysReg(MO.getReg()) && - !DAG.TII->isIgnorableUse(MO)) - return false; - - // Constant PhysRegs and IgnorableUses are okay - continue; - } - - LiveInterval &LI = LIS->getInterval(MO.getReg()); - const VNInfo *OVNI = LI.getVNInfoAt(OriginalIdx); - assert(OVNI); - - // Don't allow rematerialization immediately after the original def. - // It would be incorrect if InstToRemat redefines the register. - // See PR14098. - if (SlotIndex::isSameInstr(OriginalIdx, RematIdx)) - return false; - - if (OVNI != LI.getVNInfoAt(RematIdx)) - return false; - - // Check that subrange is live at RematIdx. - if (LI.hasSubRanges()) { - const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo(); - unsigned SubReg = MO.getSubReg(); - LaneBitmask LM = SubReg ? TRI->getSubRegIndexLaneMask(SubReg) - : MRI.getMaxLaneMaskForVReg(MO.getReg()); - for (LiveInterval::SubRange &SR : LI.subranges()) { - if ((SR.LaneMask & LM).none()) - continue; - if (!SR.liveAt(RematIdx)) - return false; - - // Early exit if all used lanes are checked. No need to continue. - LM &= ~SR.LaneMask; - if (LM.none()) - break; - } - } - } - return true; -} - bool PreRARematStage::canIncreaseOccupancyOrReduceSpill() { const Function &F = MF.getFunction(); @@ -1812,9 +1755,9 @@ bool PreRARematStage::canIncreaseOccupancyOrReduceSpill() { // Do not rematerialize an instruction it it uses registers that aren't // available at its use. This ensures that we are not extending any live // range while rematerializing. - SlotIndex DefIdx = DAG.LIS->getInstructionIndex(DefMI); SlotIndex UseIdx = DAG.LIS->getInstructionIndex(*UseMI).getRegSlot(true); - if (!allUsesAvailableAt(&DefMI, DefIdx, UseIdx)) + if (!VirtRegAuxInfo::allUsesAvailableAt(&DefMI, UseIdx, *DAG.LIS, DAG.MRI, + *DAG.TII)) continue; REMAT_DEBUG(dbgs() << "Region " << I << ": remat instruction " << DefMI); diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h index 06b9b64..8ea4267 100644 --- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h +++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h @@ -496,12 +496,6 @@ private: /// stage to their pre-stage values. void finalizeGCNSchedStage() override; - /// \p Returns true if all the uses in \p InstToRemat defined at \p - /// OriginalIdx are live at \p RematIdx. This only checks liveness of virtual - /// reg uses. - bool allUsesAvailableAt(const MachineInstr *InstToRemat, - SlotIndex OriginalIdx, SlotIndex RematIdx) const; - public: bool initGCNSchedStage() override; diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td index 77df721..54f57e0 100644 --- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td @@ -314,9 +314,10 @@ let SubtargetPredicate = HasGFX950Insts, OtherPredicates = [HasBF16ConversionIns defm V_CVT_F32_BF16 : VOP1Inst_t16 <"v_cvt_f32_bf16", VOP_F32_BF16>; } let SubtargetPredicate = isGFX1250Plus, OtherPredicates = [HasBF16ConversionInsts] in { - defm V_CVT_F32_BF16_gfx1250 : VOP1Inst_t16_with_profiles <"v_cvt_f32_bf16_gfx1250", VOP_F32_BF16, - VOPProfile_CVT_F32_BF16_gfx1250_t16, - VOPProfile_CVT_F32_BF16_gfx1250_fake16>; + let True16Predicate = UseRealTrue16Insts in + defm V_CVT_F32_BF16_gfx1250_t16 : VOP1Inst <"V_CVT_F32_BF16_gfx1250_t16", VOPProfile_CVT_F32_BF16_gfx1250_t16>; + let True16Predicate = UseFakeTrue16Insts in + defm V_CVT_F32_BF16_gfx1250_fake16 : VOP1Inst <"V_CVT_F32_BF16_gfx1250_fake16", VOPProfile_CVT_F32_BF16_gfx1250_fake16>; } let ReadsModeReg = 0, mayRaiseFPException = 0 in { @@ -899,6 +900,7 @@ class VOP1_DPP16_Gen<bits<8> op, VOP1_DPP_Pseudo ps, GFXGen Gen, VOPProfile p = let DecoderNamespace = Gen.DecoderNamespace; let OtherPredicates = !listconcat(ps.OtherPredicates, !if(p.HasExt64BitDPP, [HasDPALU_DPP], [])); + let True16Predicate = ps.True16Predicate; } class VOP1_DPP8<bits<8> op, VOP1_Pseudo ps, VOPProfile p = ps.Pfl> : @@ -921,6 +923,7 @@ class VOP1_DPP8_Gen<bits<8> op, VOP1_Pseudo ps, GFXGen Gen, VOPProfile p = ps.Pf VOP1_DPP8<op, ps, p> { let AssemblerPredicate = Gen.AssemblerPredicate; let DecoderNamespace = Gen.DecoderNamespace; + let True16Predicate = ps.True16Predicate; } //===----------------------------------------------------------------------===// @@ -1149,7 +1152,7 @@ defm V_TANH_F16 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x01f>; defm V_PERMLANE16_SWAP_B32 : VOP1_Real_OpSelIsDPP_gfx1250<0x049>; defm V_TANH_BF16 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x04a>; defm V_PRNG_B32 : VOP1_Real_FULL<GFX1250Gen, 0x04b>; -defm V_CVT_F32_BF16 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x072, "v_cvt_f32_bf16", "V_CVT_F32_BF16_gfx1250">; +defm V_CVT_F32_BF16_gfx1250 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x072, "v_cvt_f32_bf16">; defm V_SAT_PK4_I4_I8 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x073>; defm V_SAT_PK4_U4_U8 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x074>; defm V_CVT_PK_F16_FP8 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x075>; diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp index d4d9e54..4105618 100644 --- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp @@ -46,6 +46,8 @@ private: MachineBasicBlock::iterator &NextMBBI); bool expandCCOp(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, MachineBasicBlock::iterator &NextMBBI); + bool expandCCOpToCMov(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI); bool expandVMSET_VMCLR(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned Opcode); bool expandMV_FPR16INX(MachineBasicBlock &MBB, @@ -178,6 +180,9 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB, bool RISCVExpandPseudo::expandCCOp(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, MachineBasicBlock::iterator &NextMBBI) { + // First try expanding to a Conditional Move rather than a branch+mv + if (expandCCOpToCMov(MBB, MBBI)) + return true; MachineFunction *MF = MBB.getParent(); MachineInstr &MI = *MBBI; @@ -277,6 +282,86 @@ bool RISCVExpandPseudo::expandCCOp(MachineBasicBlock &MBB, return true; } +bool RISCVExpandPseudo::expandCCOpToCMov(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI) { + MachineInstr &MI = *MBBI; + DebugLoc DL = MI.getDebugLoc(); + + if (MI.getOpcode() != RISCV::PseudoCCMOVGPR && + MI.getOpcode() != RISCV::PseudoCCMOVGPRNoX0) + return false; + + if (!STI->hasVendorXqcicm()) + return false; + + // FIXME: Would be wonderful to support LHS=X0, but not very easy. + if (MI.getOperand(1).getReg() == RISCV::X0 || + MI.getOperand(4).getReg() == RISCV::X0 || + MI.getOperand(5).getReg() == RISCV::X0) + return false; + + auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm()); + + unsigned CMovOpcode, CMovIOpcode; + switch (CC) { + default: + llvm_unreachable("Unhandled CC"); + case RISCVCC::COND_EQ: + CMovOpcode = RISCV::QC_MVEQ; + CMovIOpcode = RISCV::QC_MVEQI; + break; + case RISCVCC::COND_NE: + CMovOpcode = RISCV::QC_MVNE; + CMovIOpcode = RISCV::QC_MVNEI; + break; + case RISCVCC::COND_LT: + CMovOpcode = RISCV::QC_MVLT; + CMovIOpcode = RISCV::QC_MVLTI; + break; + case RISCVCC::COND_GE: + CMovOpcode = RISCV::QC_MVGE; + CMovIOpcode = RISCV::QC_MVGEI; + break; + case RISCVCC::COND_LTU: + CMovOpcode = RISCV::QC_MVLTU; + CMovIOpcode = RISCV::QC_MVLTUI; + break; + case RISCVCC::COND_GEU: + CMovOpcode = RISCV::QC_MVGEU; + CMovIOpcode = RISCV::QC_MVGEUI; + break; + } + + if (MI.getOperand(2).getReg() == RISCV::X0) { + // $dst = PseudoCCMOVGPR $lhs, X0, $cc, $falsev (=$dst), $truev + // $dst = PseudoCCMOVGPRNoX0 $lhs, X0, $cc, $falsev (=$dst), $truev + // => + // $dst = QC_MVccI $falsev (=$dst), $lhs, 0, $truev + BuildMI(MBB, MBBI, DL, TII->get(CMovIOpcode)) + .addDef(MI.getOperand(0).getReg()) + .addReg(MI.getOperand(4).getReg()) + .addReg(MI.getOperand(1).getReg()) + .addImm(0) + .addReg(MI.getOperand(5).getReg()); + + MI.eraseFromParent(); + return true; + } + + // $dst = PseudoCCMOVGPR $lhs, $rhs, $cc, $falsev (=$dst), $truev + // $dst = PseudoCCMOVGPRNoX0 $lhs, $rhs, $cc, $falsev (=$dst), $truev + // => + // $dst = QC_MVcc $falsev (=$dst), $lhs, $rhs, $truev + BuildMI(MBB, MBBI, DL, TII->get(CMovOpcode)) + .addDef(MI.getOperand(0).getReg()) + .addReg(MI.getOperand(4).getReg()) + .addReg(MI.getOperand(1).getReg()) + .addReg(MI.getOperand(2).getReg()) + .addReg(MI.getOperand(5).getReg()); + MI.eraseFromParent(); + return true; +} + bool RISCVExpandPseudo::expandVMSET_VMCLR(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned Opcode) { diff --git a/llvm/lib/Target/RISCV/RISCVGISel.td b/llvm/lib/Target/RISCV/RISCVGISel.td index 2e5f30f..19d5aff 100644 --- a/llvm/lib/Target/RISCV/RISCVGISel.td +++ b/llvm/lib/Target/RISCV/RISCVGISel.td @@ -109,8 +109,9 @@ def : LdPat<extloadi8, LBU, i16>; // Prefer unsigned due to no c.lb in Zcb. def : StPat<truncstorei8, SB, GPR, i16>; let Predicates = [HasAtomicLdSt] in { - def : LdPat<atomic_load_aext_8, LB, i16>; - def : LdPat<atomic_load_nonext_16, LH, i16>; + // Prefer unsigned due to no c.lb in Zcb. + def : LdPat<atomic_load_aext_8, LBU, i16>; + def : LdPat<atomic_load_nonext_16, LH, i16>; def : StPat<atomic_store_8, SB, GPR, i16>; def : StPat<atomic_store_16, SH, GPR, i16>; diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index 90e1c47a..6a6ead2 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -70,6 +70,10 @@ static unsigned getSEWOpNum(const MachineInstr &MI) { return RISCVII::getSEWOpNum(MI.getDesc()); } +static unsigned getVecPolicyOpNum(const MachineInstr &MI) { + return RISCVII::getVecPolicyOpNum(MI.getDesc()); +} + /// Get the EEW for a load or store instruction. Return std::nullopt if MI is /// not a load or store which ignores SEW. static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) { @@ -986,7 +990,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const { // If there is a policy operand, use it. if (RISCVII::hasVecPolicyOp(TSFlags)) { - const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1); + const MachineOperand &Op = MI.getOperand(getVecPolicyOpNum(MI)); uint64_t Policy = Op.getImm(); assert(Policy <= (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC) && diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td index 59f5aeb..99992d1 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td @@ -165,10 +165,11 @@ class seq_cst_store<PatFrag base> // any ordering. This is necessary because AtomicExpandPass has added fences to // atomic load/stores and changed them to unordered ones. let Predicates = [HasAtomicLdSt] in { - def : LdPat<relaxed_load<atomic_load_asext_8>, LB>; + // Use unsigned for aext due to no c.lb in Zcb. + def : LdPat<relaxed_load<atomic_load_sext_8>, LB>; + def : LdPat<relaxed_load<atomic_load_azext_8>, LBU>; def : LdPat<relaxed_load<atomic_load_asext_16>, LH>; - def : LdPat<relaxed_load<atomic_load_zext_8>, LBU>; - def : LdPat<relaxed_load<atomic_load_zext_16>, LHU>; + def : LdPat<relaxed_load<atomic_load_zext_16>, LHU>; def : StPat<relaxed_store<atomic_store_8>, SB, GPR, XLenVT>; def : StPat<relaxed_store<atomic_store_16>, SH, GPR, XLenVT>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td index 5407868..efdbd12 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td @@ -1350,6 +1350,10 @@ class QCIMVCCIPat<CondCode Cond, QCIMVCCI Inst, DAGOperand InTyImm> : Pat<(i32 (riscv_selectcc (i32 GPRNoX0:$rs1), InTyImm:$imm, Cond, (i32 GPRNoX0:$rs3), (i32 GPRNoX0:$rd))), (Inst GPRNoX0:$rd, GPRNoX0:$rs1, InTyImm:$imm, GPRNoX0:$rs3)>; +class QCIMVCCIZeroPat<CondCode Cond, QCIMVCCI Inst> + : Pat<(i32 (riscv_selectcc (i32 GPRNoX0:$rs1), (i32 0), Cond, (i32 GPRNoX0:$rs3), (i32 GPRNoX0:$rd))), + (Inst GPRNoX0:$rd, GPRNoX0:$rs1, 0, GPRNoX0:$rs3)>; + class QCISELECTCCIPat<CondCode Cond, QCISELECTCCI Inst> : Pat<(i32 (riscv_selectcc (i32 GPRNoX0:$rd), simm5:$imm, Cond, (i32 GPRNoX0:$rs2), (i32 GPRNoX0:$rs3))), (Inst GPRNoX0:$rd, simm5:$imm, GPRNoX0:$rs2, GPRNoX0:$rs3)>; @@ -1538,14 +1542,7 @@ def: Pat<(i32 (ctlz (not (i32 GPR:$rs1)))), (QC_CLO GPR:$rs1)>; let Predicates = [HasVendorXqciint, IsRV32] in def : Pat<(riscv_mileaveret_glue), (QC_C_MILEAVERET)>; -let Predicates = [HasVendorXqcicm, IsRV32] in { -// (SELECT X, Y, Z) is canonicalised to `(riscv_selectcc x, 0, NE, y, z)`. -// This exists to prioritise over the `Select_GPR_Using_CC_GPR` pattern. -def : Pat<(i32 (riscv_selectcc (i32 GPRNoX0:$rs1), (i32 0), SETNE, (i32 GPRNoX0:$rs3), (i32 GPRNoX0:$rd))), - (QC_MVNEI GPRNoX0:$rd, GPRNoX0:$rs1, 0, GPRNoX0:$rs3)>; -def : Pat<(i32 (riscv_selectcc (i32 GPRNoX0:$rs1), (i32 0), SETEQ, (i32 GPRNoX0:$rs3), (i32 GPRNoX0:$rd))), - (QC_MVEQI GPRNoX0:$rd, GPRNoX0:$rs1, 0, GPRNoX0:$rs3)>; - +let Predicates = [HasVendorXqcicm, NoShortForwardBranchOpt, IsRV32] in { def : QCIMVCCPat<SETEQ, QC_MVEQ>; def : QCIMVCCPat<SETNE, QC_MVNE>; def : QCIMVCCPat<SETLT, QC_MVLT>; @@ -1553,12 +1550,24 @@ def : QCIMVCCPat<SETULT, QC_MVLTU>; def : QCIMVCCPat<SETGE, QC_MVGE>; def : QCIMVCCPat<SETUGE, QC_MVGEU>; -def : QCIMVCCIPat<SETEQ, QC_MVEQI, simm5>; -def : QCIMVCCIPat<SETNE, QC_MVNEI, simm5>; -def : QCIMVCCIPat<SETLT, QC_MVLTI, simm5>; -def : QCIMVCCIPat<SETULT, QC_MVLTUI, uimm5>; -def : QCIMVCCIPat<SETGE, QC_MVGEI, simm5>; -def : QCIMVCCIPat<SETUGE, QC_MVGEUI, uimm5>; +// These exist to prioritise over the `Select_GPR_Using_CC_GPR` pattern for X0. +def : QCIMVCCIZeroPat<SETEQ, QC_MVEQI>; +def : QCIMVCCIZeroPat<SETNE, QC_MVNEI>; +def : QCIMVCCIZeroPat<SETLT, QC_MVLTI>; +def : QCIMVCCIZeroPat<SETULT, QC_MVLTUI>; +def : QCIMVCCIZeroPat<SETGE, QC_MVGEI>; +def : QCIMVCCIZeroPat<SETUGE, QC_MVGEUI>; +} + +let Predicates = [HasVendorXqcicm, IsRV32] in { +// These all use *imm5nonzero because we want to use PseudoCCMOVGPR with X0 when SFB is enabled. +// When SFB is not enabled, the `QCIMVCCIZeroPat`s above will be used if RHS=0. +def : QCIMVCCIPat<SETEQ, QC_MVEQI, simm5nonzero>; +def : QCIMVCCIPat<SETNE, QC_MVNEI, simm5nonzero>; +def : QCIMVCCIPat<SETLT, QC_MVLTI, simm5nonzero>; +def : QCIMVCCIPat<SETULT, QC_MVLTUI, uimm5nonzero>; +def : QCIMVCCIPat<SETGE, QC_MVGEI, simm5nonzero>; +def : QCIMVCCIPat<SETUGE, QC_MVGEUI, uimm5nonzero>; } let Predicates = [HasVendorXqcicli, IsRV32] in { diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index d4124ae..ee25f69 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -3139,8 +3139,8 @@ bool RISCVTTIImpl::isProfitableToSinkOperands( bool IsVPSplat = match(Op, m_Intrinsic<Intrinsic::experimental_vp_splat>( m_Value(), m_Value(), m_Value())); if (!IsVPSplat && - !match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()), - m_Undef(), m_ZeroMask()))) + !match(Op, m_Shuffle(m_InsertElt(m_Value(), m_Value(), m_ZeroInt()), + m_Value(), m_ZeroMask()))) continue; // Don't sink i1 splats. diff --git a/llvm/lib/Target/TargetMachine.cpp b/llvm/lib/Target/TargetMachine.cpp index ad7e503..cf85691 100644 --- a/llvm/lib/Target/TargetMachine.cpp +++ b/llvm/lib/Target/TargetMachine.cpp @@ -27,7 +27,7 @@ #include "llvm/Target/TargetLoweringObjectFile.h" using namespace llvm; -cl::opt<bool> NoKernelInfoEndLTO( +cl::opt<bool> llvm::NoKernelInfoEndLTO( "no-kernel-info-end-lto", cl::desc("remove the kernel-info pass at the end of the full LTO pipeline"), cl::init(false), cl::Hidden); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index cd04ff5..34854e4 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -44615,8 +44615,11 @@ bool X86TargetLowering::SimplifyDemandedBitsForTargetNode( APInt DemandedMask = OriginalDemandedBits << ShAmt; - // If we just want the sign bit then we don't need to shift it. - if (OriginalDemandedBits.isSignMask()) + // If we only want bits that already match the signbit then we don't need + // to shift. + unsigned NumHiDemandedBits = BitWidth - OriginalDemandedBits.countr_zero(); + if (TLO.DAG.ComputeNumSignBits(Op0, OriginalDemandedElts, Depth + 1) >= + NumHiDemandedBits) return TLO.CombineTo(Op, Op0); // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1 @@ -45169,6 +45172,18 @@ bool X86TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode( case X86ISD::Wrapper: case X86ISD::WrapperRIP: return true; + case X86ISD::PACKSS: + case X86ISD::PACKUS: { + APInt DemandedLHS, DemandedRHS; + getPackDemandedElts(Op.getSimpleValueType(), DemandedElts, DemandedLHS, + DemandedRHS); + return (!DemandedLHS || + DAG.isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), DemandedLHS, + PoisonOnly, Depth + 1)) && + (!DemandedRHS || + DAG.isGuaranteedNotToBeUndefOrPoison(Op.getOperand(1), DemandedRHS, + PoisonOnly, Depth + 1)); + } case X86ISD::INSERTPS: case X86ISD::BLENDI: case X86ISD::PSHUFB: @@ -45239,6 +45254,10 @@ bool X86TargetLowering::canCreateUndefOrPoisonForTargetNode( case X86ISD::BLENDI: case X86ISD::BLENDV: return false; + // SSE packs. + case X86ISD::PACKSS: + case X86ISD::PACKUS: + return false; // SSE target shuffles. case X86ISD::INSERTPS: case X86ISD::PSHUFB: diff --git a/llvm/lib/Transforms/IPO/FunctionImport.cpp b/llvm/lib/Transforms/IPO/FunctionImport.cpp index 83aa7de..28ee444 100644 --- a/llvm/lib/Transforms/IPO/FunctionImport.cpp +++ b/llvm/lib/Transforms/IPO/FunctionImport.cpp @@ -72,6 +72,7 @@ STATISTIC(NumImportedModules, "Number of modules imported from"); STATISTIC(NumDeadSymbols, "Number of dead stripped symbols in index"); STATISTIC(NumLiveSymbols, "Number of live symbols in index"); +namespace llvm { cl::opt<bool> ForceImportAll("force-import-all", cl::init(false), cl::Hidden, cl::desc("Import functions with noinline attribute")); @@ -185,9 +186,8 @@ static cl::opt<bool> CtxprofMoveRootsToOwnModule( extern cl::list<GlobalValue::GUID> MoveSymbolGUID; -namespace llvm { extern cl::opt<bool> EnableMemProfContextDisambiguation; -} +} // end namespace llvm // Load lazily a module from \p FileName in \p Context. static std::unique_ptr<Module> loadFile(const std::string &FileName, diff --git a/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp b/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp index 4f53738..150a2dc 100644 --- a/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp +++ b/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp @@ -28,10 +28,13 @@ using namespace llvm; STATISTIC(NumSpecsCreated, "Number of specializations created"); +namespace llvm { + static cl::opt<bool> ForceSpecialization( - "force-specialization", cl::init(false), cl::Hidden, cl::desc( - "Force function specialization for every call site with a constant " - "argument")); + "force-specialization", cl::init(false), cl::Hidden, + cl::desc( + "Force function specialization for every call site with a constant " + "argument")); static cl::opt<unsigned> MaxClones( "funcspec-max-clones", cl::init(3), cl::Hidden, cl::desc( @@ -91,6 +94,8 @@ static cl::opt<bool> SpecializeLiteralConstant( extern cl::opt<bool> ProfcheckDisableMetadataFixes; +} // end namespace llvm + bool InstCostVisitor::canEliminateSuccessor(BasicBlock *BB, BasicBlock *Succ) const { unsigned I = 0; diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp index 15f4d76..c4f1b68 100644 --- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp +++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp @@ -214,11 +214,12 @@ static cl::opt<bool> MemProfRequireDefinitionForPromotion( "memprof-require-definition-for-promotion", cl::init(false), cl::Hidden, cl::desc( "Require target function definition when promoting indirect calls")); -} // namespace llvm extern cl::opt<bool> MemProfReportHintedSizes; extern cl::opt<unsigned> MinClonedColdBytePercent; +} // namespace llvm + namespace { /// CRTP base for graphs built from either IR or ThinLTO summary index. /// diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp index 5bc7e34..e39e311 100644 --- a/llvm/lib/Transforms/IPO/SampleProfile.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp @@ -116,6 +116,8 @@ STATISTIC( NumCSInlinedHitGrowthLimit, "Number of functions with FDO inline stopped due to growth size limit"); +namespace llvm { + // Command line option to specify the file to read samples from. This is // mainly used for debugging. static cl::opt<std::string> SampleProfileFile( @@ -198,7 +200,6 @@ static cl::opt<bool> DisableSampleLoaderInlining( "pass, and merge (or scale) profiles (as configured by " "--sample-profile-merge-inlinee).")); -namespace llvm { cl::opt<bool> SortProfiledSCC("sort-profiled-scc-member", cl::init(true), cl::Hidden, cl::desc("Sort profiled recursion by edge weights.")); @@ -1664,8 +1665,9 @@ void SampleProfileLoader::generateMDProfMetadata(Function &F) { else if (OverwriteExistingWeights) I.setMetadata(LLVMContext::MD_prof, nullptr); } else if (!isa<IntrinsicInst>(&I)) { - setBranchWeights(I, {static_cast<uint32_t>(BlockWeights[BB])}, - /*IsExpected=*/false); + setBranchWeights( + I, ArrayRef<uint32_t>{static_cast<uint32_t>(BlockWeights[BB])}, + /*IsExpected=*/false); } } } else if (OverwriteExistingWeights || ProfileSampleBlockAccurate) { @@ -1676,7 +1678,8 @@ void SampleProfileLoader::generateMDProfMetadata(Function &F) { if (cast<CallBase>(I).isIndirectCall()) { I.setMetadata(LLVMContext::MD_prof, nullptr); } else { - setBranchWeights(I, {uint32_t(0)}, /*IsExpected=*/false); + setBranchWeights(I, ArrayRef<uint32_t>{uint32_t(0)}, + /*IsExpected=*/false); } } } diff --git a/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp b/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp index 093a39e..70b8614 100644 --- a/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp @@ -23,6 +23,8 @@ using namespace sampleprof; #define DEBUG_TYPE "sample-profile-matcher" +namespace llvm { + static cl::opt<unsigned> FuncProfileSimilarityThreshold( "func-profile-similarity-threshold", cl::Hidden, cl::init(80), cl::desc("Consider a profile matches a function if the similarity of their " @@ -55,6 +57,8 @@ static cl::opt<unsigned> SalvageStaleProfileMaxCallsites( cl::desc("The maximum number of callsites in a function, above which stale " "profile matching will be skipped.")); +} // end namespace llvm + void SampleProfileMatcher::findIRAnchors(const Function &F, AnchorMap &IRAnchors) const { // For inlined code, recover the original callsite and callee by finding the diff --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp index 09bffa7..ac41fdd 100644 --- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp +++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp @@ -120,6 +120,8 @@ STATISTIC(NumVirtConstProp1Bit, "Number of 1 bit virtual constant propagations"); STATISTIC(NumVirtConstProp, "Number of virtual constant propagations"); +namespace llvm { + static cl::opt<PassSummaryAction> ClSummaryAction( "wholeprogramdevirt-summary-action", cl::desc("What to do with the summary when running this pass"), @@ -175,6 +177,8 @@ static cl::list<std::string> extern cl::opt<bool> ProfcheckDisableMetadataFixes; +} // end namespace llvm + /// With Clang, a pure virtual class's deleting destructor is emitted as a /// `llvm.trap` intrinsic followed by an unreachable IR instruction. In the /// context of whole program devirtualization, the deleting destructor of a pure diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index 4b7793f..9b272c4 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -3080,6 +3080,13 @@ InstCombinerImpl::convertOrOfShiftsToFunnelShift(Instruction &Or) { assert(ZextLowShlAmt->uge(HighSize) && ZextLowShlAmt->ule(Width - LowSize) && "Invalid concat"); + // We cannot reuse the result if it may produce poison. + // Drop poison generating flags in the expression tree. + // Or + cast<Instruction>(U)->dropPoisonGeneratingFlags(); + // Shl + cast<Instruction>(X)->dropPoisonGeneratingFlags(); + FShiftArgs = {U, U, ConstantInt::get(Or0->getType(), *ZextHighShlAmt)}; break; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp index 6ef3066..18a45c6 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -319,20 +319,20 @@ Instruction *InstCombinerImpl::foldBitcastExtElt(ExtractElementInst &Ext) { return nullptr; } -/// Find elements of V demanded by UserInstr. -static APInt findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr) { +/// Find elements of V demanded by UserInstr. If returns false, we were not able +/// to determine all elements. +static bool findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr, + APInt &UnionUsedElts) { unsigned VWidth = cast<FixedVectorType>(V->getType())->getNumElements(); - // Conservatively assume that all elements are needed. - APInt UsedElts(APInt::getAllOnes(VWidth)); - switch (UserInstr->getOpcode()) { case Instruction::ExtractElement: { ExtractElementInst *EEI = cast<ExtractElementInst>(UserInstr); assert(EEI->getVectorOperand() == V); ConstantInt *EEIIndexC = dyn_cast<ConstantInt>(EEI->getIndexOperand()); if (EEIIndexC && EEIIndexC->getValue().ult(VWidth)) { - UsedElts = APInt::getOneBitSet(VWidth, EEIIndexC->getZExtValue()); + UnionUsedElts.setBit(EEIIndexC->getZExtValue()); + return true; } break; } @@ -341,23 +341,23 @@ static APInt findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr) { unsigned MaskNumElts = cast<FixedVectorType>(UserInstr->getType())->getNumElements(); - UsedElts = APInt(VWidth, 0); - for (unsigned i = 0; i < MaskNumElts; i++) { - unsigned MaskVal = Shuffle->getMaskValue(i); + for (auto I : llvm::seq(MaskNumElts)) { + unsigned MaskVal = Shuffle->getMaskValue(I); if (MaskVal == -1u || MaskVal >= 2 * VWidth) continue; if (Shuffle->getOperand(0) == V && (MaskVal < VWidth)) - UsedElts.setBit(MaskVal); + UnionUsedElts.setBit(MaskVal); if (Shuffle->getOperand(1) == V && ((MaskVal >= VWidth) && (MaskVal < 2 * VWidth))) - UsedElts.setBit(MaskVal - VWidth); + UnionUsedElts.setBit(MaskVal - VWidth); } - break; + return true; } default: break; } - return UsedElts; + + return false; } /// Find union of elements of V demanded by all its users. @@ -370,7 +370,8 @@ static APInt findDemandedEltsByAllUsers(Value *V) { APInt UnionUsedElts(VWidth, 0); for (const Use &U : V->uses()) { if (Instruction *I = dyn_cast<Instruction>(U.getUser())) { - UnionUsedElts |= findDemandedEltsBySingleUser(V, I); + if (!findDemandedEltsBySingleUser(V, I, UnionUsedElts)) + return APInt::getAllOnes(VWidth); } else { UnionUsedElts = APInt::getAllOnes(VWidth); break; diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index ff063f9..917004c 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -132,9 +132,11 @@ STATISTIC(NumReassoc , "Number of reassociations"); DEBUG_COUNTER(VisitCounter, "instcombine-visit", "Controls which instructions are visited"); -static cl::opt<bool> -EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), - cl::init(true)); +namespace llvm { + +static cl::opt<bool> EnableCodeSinking("instcombine-code-sinking", + cl::desc("Enable code sinking"), + cl::init(true)); static cl::opt<unsigned> MaxSinkNumUsers( "instcombine-max-sink-users", cl::init(32), @@ -156,6 +158,8 @@ extern cl::opt<bool> ProfcheckDisableMetadataFixes; static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true)); +} // end namespace llvm + std::optional<Instruction *> InstCombiner::targetInstCombineIntrinsic(IntrinsicInst &II) { // Handle target specific intrinsics @@ -5212,7 +5216,7 @@ Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) { else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value()))) V = ConstantInt::getTrue(Ty); else if (match(U, m_c_Select(m_Specific(&I), m_Value(V)))) { - if (!isGuaranteedNotToBeUndefOrPoison(V, &AC, &I, &DT)) + if (V == &I || !isGuaranteedNotToBeUndefOrPoison(V, &AC, &I, &DT)) V = NullValue; } else if (auto *PHI = dyn_cast<PHINode>(U)) { if (Value *MaybeV = pickCommonConstantFromPHI(*PHI)) @@ -5225,6 +5229,7 @@ Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) { BestValue = NullValue; } assert(BestValue && "Must have at least one use"); + assert(BestValue != &I && "Cannot replace with itself"); return BestValue; }; diff --git a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp index f451c2b..cf87e35 100644 --- a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp +++ b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp @@ -55,11 +55,11 @@ using namespace llvm; STATISTIC(NumOfPGOICallPromotion, "Number of indirect call promotions."); STATISTIC(NumOfPGOICallsites, "Number of indirect call candidate sites."); +namespace llvm { extern cl::opt<unsigned> MaxNumVTableAnnotations; -namespace llvm { extern cl::opt<bool> EnableVTableProfileUse; -} +} // namespace llvm // Command line option to disable indirect-call promotion with the default as // false. This is for debug purpose. @@ -672,8 +672,8 @@ CallBase &llvm::pgo::promoteIndirectCall(CallBase &CB, Function *DirectCallee, createBranchWeights(CB.getContext(), Count, TotalCount - Count)); if (AttachProfToDirectCall) - setBranchWeights(NewInst, {static_cast<uint32_t>(Count)}, - /*IsExpected=*/false); + setFittedBranchWeights(NewInst, {Count}, + /*IsExpected=*/false); using namespace ore; diff --git a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp index d9e850e..120c4f6 100644 --- a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp +++ b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp @@ -222,7 +222,6 @@ cl::opt<bool> NoPGOWarnMismatchComdatWeak( cl::desc("The option is used to turn on/off " "warnings about hash mismatch for comdat " "or weak functions.")); -} // namespace llvm // Command line option to enable/disable select instruction instrumentation. static cl::opt<bool> @@ -347,7 +346,6 @@ cl::list<std::string> CtxPGOSkipCallsiteInstrument( extern cl::opt<unsigned> MaxNumVTableAnnotations; -namespace llvm { // Command line option to turn on CFG dot dump after profile annotation. // Defined in Analysis/BlockFrequencyInfo.cpp: -pgo-view-counts extern cl::opt<PGOViewCountsType> PGOViewCounts; diff --git a/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp b/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp index 343bec3..a5f417a 100644 --- a/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp +++ b/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp @@ -54,6 +54,8 @@ using namespace llvm; STATISTIC(NumOfPGOMemOPOpt, "Number of memop intrinsics optimized."); STATISTIC(NumOfPGOMemOPAnnotate, "Number of memop intrinsics annotated."); +namespace llvm { + // The minimum call count to optimize memory intrinsic calls. static cl::opt<unsigned> MemOPCountThreshold("pgo-memop-count-threshold", cl::Hidden, cl::init(1000), @@ -93,6 +95,8 @@ static cl::opt<unsigned> MemOpMaxOptSize("memop-value-prof-max-opt-size", cl::Hidden, cl::init(128), cl::desc("Optimize the memop size <= this value")); +} // end namespace llvm + namespace { static const char *getMIName(const MemIntrinsic *MI) { diff --git a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc index a3d4e53..0534fdd 100644 --- a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc +++ b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc @@ -21,7 +21,9 @@ using namespace llvm; using CandidateInfo = ValueProfileCollector::CandidateInfo; +namespace llvm { extern cl::opt<bool> MemOPOptMemcmpBcmp; +} // end namespace llvm ///--------------------------- MemIntrinsicPlugin ------------------------------ class MemIntrinsicPlugin : public InstVisitor<MemIntrinsicPlugin> { diff --git a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp index 944b253..e9a3e98 100644 --- a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp +++ b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp @@ -190,12 +190,12 @@ void unfold(DomTreeUpdater *DTU, LoopInfo *LI, SelectInstToUnfold SIToUnfold, std::vector<BasicBlock *> *NewBBs) { SelectInst *SI = SIToUnfold.getInst(); PHINode *SIUse = SIToUnfold.getUse(); - BasicBlock *StartBlock = SI->getParent(); + assert(SI->hasOneUse()); + // The select may come indirectly, instead of from where it is defined. + BasicBlock *StartBlock = SIUse->getIncomingBlock(*SI->use_begin()); BranchInst *StartBlockTerm = dyn_cast<BranchInst>(StartBlock->getTerminator()); - assert(StartBlockTerm); - assert(SI->hasOneUse()); if (StartBlockTerm->isUnconditional()) { BasicBlock *EndBlock = StartBlock->getUniqueSuccessor(); @@ -332,7 +332,7 @@ void unfold(DomTreeUpdater *DTU, LoopInfo *LI, SelectInstToUnfold SIToUnfold, } // Preserve loop info - if (Loop *L = LI->getLoopFor(SI->getParent())) { + if (Loop *L = LI->getLoopFor(StartBlock)) { for (BasicBlock *NewBB : *NewBBs) L->addBasicBlockToLoop(NewBB, *LI); } @@ -533,6 +533,8 @@ private: return false; // Only fold the select coming from directly where it is defined. + // TODO: We have dealt with the select coming indirectly now. This + // constraint can be relaxed. PHINode *PHIUser = dyn_cast<PHINode>(SIUse); if (PHIUser && PHIUser->getIncomingBlock(*SI->use_begin()) != SIBB) return false; diff --git a/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp b/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp index 2025fbb..3c14036e 100644 --- a/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp +++ b/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp @@ -26,6 +26,8 @@ using namespace llvm; +namespace llvm { + static cl::opt<unsigned> JumpTableSizeThreshold("jump-table-to-switch-size-threshold", cl::Hidden, cl::desc("Only split jump tables with size less or " @@ -43,6 +45,8 @@ static cl::opt<unsigned> FunctionSizeThreshold( extern cl::opt<bool> ProfcheckDisableMetadataFixes; +} // end namespace llvm + #define DEBUG_TYPE "jump-table-to-switch" namespace { @@ -201,14 +205,12 @@ PreservedAnalyses JumpTableToSwitchPass::run(Function &F, PostDominatorTree *PDT = AM.getCachedResult<PostDominatorTreeAnalysis>(F); DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Lazy); bool Changed = false; - InstrProfSymtab Symtab; - if (auto E = Symtab.create(*F.getParent())) - F.getContext().emitError( - "Could not create indirect call table, likely corrupted IR" + - toString(std::move(E))); - DenseMap<const Function *, GlobalValue::GUID> FToGuid; - for (const auto &[G, FPtr] : Symtab.getIDToNameMap()) - FToGuid.insert({FPtr, G}); + auto FuncToGuid = [&](const Function &Fct) { + if (Fct.getMetadata(AssignGUIDPass::GUIDMetadataName)) + return AssignGUIDPass::getGUID(Fct); + + return Function::getGUIDAssumingExternalLinkage(getIRPGOFuncName(F, InLTO)); + }; for (BasicBlock &BB : make_early_inc_range(F)) { BasicBlock *CurrentBB = &BB; @@ -230,12 +232,8 @@ PreservedAnalyses JumpTableToSwitchPass::run(Function &F, std::optional<JumpTableTy> JumpTable = parseJumpTable(GEP, PtrTy); if (!JumpTable) continue; - SplittedOutTail = expandToSwitch( - Call, *JumpTable, DTU, ORE, [&](const Function &Fct) { - if (Fct.getMetadata(AssignGUIDPass::GUIDMetadataName)) - return AssignGUIDPass::getGUID(Fct); - return FToGuid.lookup_or(&Fct, 0U); - }); + SplittedOutTail = + expandToSwitch(Call, *JumpTable, DTU, ORE, FuncToGuid); Changed = true; break; } diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp index bab1f2a..9655173 100644 --- a/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/llvm/lib/Transforms/Scalar/LICM.cpp @@ -116,6 +116,8 @@ STATISTIC(NumIntAssociationsHoisted, STATISTIC(NumBOAssociationsHoisted, "Number of invariant BinaryOp expressions " "reassociated and hoisted out of the loop"); +namespace llvm { + /// Memory promotion is enabled by default. static cl::opt<bool> DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), @@ -154,7 +156,7 @@ static cl::opt<unsigned> IntAssociationUpperLimit( // which may not be precise, since optimizeUses is capped. The result is // correct, but we may not get as "far up" as possible to get which access is // clobbering the one queried. -cl::opt<unsigned> llvm::SetLicmMssaOptCap( +cl::opt<unsigned> SetLicmMssaOptCap( "licm-mssa-optimization-cap", cl::init(100), cl::Hidden, cl::desc("Enable imprecision in LICM in pathological cases, in exchange " "for faster compile. Caps the MemorySSA clobbering calls.")); @@ -162,7 +164,7 @@ cl::opt<unsigned> llvm::SetLicmMssaOptCap( // Experimentally, memory promotion carries less importance than sinking and // hoisting. Limit when we do promotion when using MemorySSA, in order to save // compile time. -cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap( +cl::opt<unsigned> SetLicmMssaNoAccForPromotionCap( "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden, cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no " "effect. When MSSA in LICM is enabled, then this is the maximum " @@ -171,6 +173,8 @@ cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap( extern cl::opt<bool> ProfcheckDisableMetadataFixes; +} // end namespace llvm + static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI); static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index 0874b29..019536ca 100644 --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -1598,11 +1598,8 @@ bool LoopIdiomRecognize::optimizeCRCLoop(const PolynomialInfo &Info) { // crc = (crc << 8) ^ tbl[(iv'th byte of data) ^ (top byte of crc)] { auto LoByte = [](IRBuilderBase &Builder, Value *Op, const Twine &Name) { - Type *OpTy = Op->getType(); - unsigned OpBW = OpTy->getIntegerBitWidth(); - return OpBW > 8 - ? Builder.CreateAnd(Op, ConstantInt::get(OpTy, 0XFF), Name) - : Op; + return Builder.CreateZExtOrTrunc( + Op, IntegerType::getInt8Ty(Op->getContext()), Name); }; auto HiIdx = [LoByte, CRCBW](IRBuilderBase &Builder, Value *Op, const Twine &Name) { diff --git a/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp b/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp index 1a9e16b..d31154f 100644 --- a/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp +++ b/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp @@ -17,6 +17,8 @@ using namespace llvm; +namespace llvm { + /// Uses the "source_filename" instead of a Module hash ID for the suffix of /// promoted locals during LTO. NOTE: This requires that the source filename /// has a unique name / path to avoid name collisions. @@ -35,6 +37,8 @@ cl::list<GlobalValue::GUID> MoveSymbolGUID( "used with the name of contextual profiling roots."), cl::Hidden); +} // end namespace llvm + FunctionImportGlobalProcessing::FunctionImportGlobalProcessing( Module &M, const ModuleSummaryIndex &Index, SetVector<GlobalValue *> *GlobalsToImport, bool ClearDSOLocalOnDeclarations) diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index 216bdf4..8bba634 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -95,7 +95,9 @@ using namespace PatternMatch; #define DEBUG_TYPE "simplifycfg" -cl::opt<bool> llvm::RequireAndPreserveDomTree( +namespace llvm { + +cl::opt<bool> RequireAndPreserveDomTree( "simplifycfg-require-and-preserve-domtree", cl::Hidden, cl::desc( @@ -205,6 +207,8 @@ static cl::opt<unsigned> MaxJumpThreadingLiveBlocks( extern cl::opt<bool> ProfcheckDisableMetadataFixes; +} // end namespace llvm + STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps"); STATISTIC(NumLinearMaps, "Number of switch instructions turned into linear mapping"); @@ -955,33 +959,6 @@ static bool valuesOverlap(std::vector<ValueEqualityComparisonCase> &C1, return false; } -// Set branch weights on SwitchInst. This sets the metadata if there is at -// least one non-zero weight. -static void setBranchWeights(SwitchInst *SI, ArrayRef<uint32_t> Weights, - bool IsExpected) { - // Check that there is at least one non-zero weight. Otherwise, pass - // nullptr to setMetadata which will erase the existing metadata. - MDNode *N = nullptr; - if (llvm::any_of(Weights, [](uint32_t W) { return W != 0; })) - N = MDBuilder(SI->getParent()->getContext()) - .createBranchWeights(Weights, IsExpected); - SI->setMetadata(LLVMContext::MD_prof, N); -} - -// Similar to the above, but for branch and select instructions that take -// exactly 2 weights. -static void setBranchWeights(Instruction *I, uint32_t TrueWeight, - uint32_t FalseWeight, bool IsExpected) { - assert(isa<BranchInst>(I) || isa<SelectInst>(I)); - // Check that there is at least one non-zero weight. Otherwise, pass - // nullptr to setMetadata which will erase the existing metadata. - MDNode *N = nullptr; - if (TrueWeight || FalseWeight) - N = MDBuilder(I->getParent()->getContext()) - .createBranchWeights(TrueWeight, FalseWeight, IsExpected); - I->setMetadata(LLVMContext::MD_prof, N); -} - /// If TI is known to be a terminator instruction and its block is known to /// only have a single predecessor block, check to see if that predecessor is /// also a value comparison with the same value, and if that comparison @@ -1181,16 +1158,6 @@ static void getBranchWeights(Instruction *TI, } } -/// Keep halving the weights until all can fit in uint32_t. -static void fitWeights(MutableArrayRef<uint64_t> Weights) { - uint64_t Max = *llvm::max_element(Weights); - if (Max > UINT_MAX) { - unsigned Offset = 32 - llvm::countl_zero(Max); - for (uint64_t &I : Weights) - I >>= Offset; - } -} - static void cloneInstructionsIntoPredecessorBlockAndUpdateSSAUses( BasicBlock *BB, BasicBlock *PredBlock, ValueToValueMapTy &VMap) { Instruction *PTI = PredBlock->getTerminator(); @@ -1446,14 +1413,9 @@ bool SimplifyCFGOpt::performValueComparisonIntoPredecessorFolding( for (ValueEqualityComparisonCase &V : PredCases) NewSI->addCase(V.Value, V.Dest); - if (PredHasWeights || SuccHasWeights) { - // Halve the weights if any of them cannot fit in an uint32_t - fitWeights(Weights); - - SmallVector<uint32_t, 8> MDWeights(Weights.begin(), Weights.end()); - - setBranchWeights(NewSI, MDWeights, /*IsExpected=*/false); - } + if (PredHasWeights || SuccHasWeights) + setFittedBranchWeights(*NewSI, Weights, /*IsExpected=*/false, + /*ElideAllZero=*/true); eraseTerminatorAndDCECond(PTI); @@ -4053,39 +4015,34 @@ static bool performBranchToCommonDestFolding(BranchInst *BI, BranchInst *PBI, // Try to update branch weights. uint64_t PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight; - SmallVector<uint32_t, 2> MDWeights; + SmallVector<uint64_t, 2> MDWeights; if (extractPredSuccWeights(PBI, BI, PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight)) { - SmallVector<uint64_t, 8> NewWeights; if (PBI->getSuccessor(0) == BB) { // PBI: br i1 %x, BB, FalseDest // BI: br i1 %y, UniqueSucc, FalseDest // TrueWeight is TrueWeight for PBI * TrueWeight for BI. - NewWeights.push_back(PredTrueWeight * SuccTrueWeight); + MDWeights.push_back(PredTrueWeight * SuccTrueWeight); // FalseWeight is FalseWeight for PBI * TotalWeight for BI + // TrueWeight for PBI * FalseWeight for BI. // We assume that total weights of a BranchInst can fit into 32 bits. // Therefore, we will not have overflow using 64-bit arithmetic. - NewWeights.push_back(PredFalseWeight * - (SuccFalseWeight + SuccTrueWeight) + - PredTrueWeight * SuccFalseWeight); + MDWeights.push_back(PredFalseWeight * (SuccFalseWeight + SuccTrueWeight) + + PredTrueWeight * SuccFalseWeight); } else { // PBI: br i1 %x, TrueDest, BB // BI: br i1 %y, TrueDest, UniqueSucc // TrueWeight is TrueWeight for PBI * TotalWeight for BI + // FalseWeight for PBI * TrueWeight for BI. - NewWeights.push_back(PredTrueWeight * (SuccFalseWeight + SuccTrueWeight) + - PredFalseWeight * SuccTrueWeight); + MDWeights.push_back(PredTrueWeight * (SuccFalseWeight + SuccTrueWeight) + + PredFalseWeight * SuccTrueWeight); // FalseWeight is FalseWeight for PBI * FalseWeight for BI. - NewWeights.push_back(PredFalseWeight * SuccFalseWeight); + MDWeights.push_back(PredFalseWeight * SuccFalseWeight); } - // Halve the weights if any of them cannot fit in an uint32_t - fitWeights(NewWeights); - - append_range(MDWeights, NewWeights); - setBranchWeights(PBI, MDWeights[0], MDWeights[1], /*IsExpected=*/false); + setFittedBranchWeights(*PBI, MDWeights, /*IsExpected=*/false, + /*ElideAllZero=*/true); // TODO: If BB is reachable from all paths through PredBlock, then we // could replace PBI's branch probabilities with BI's. @@ -4125,8 +4082,8 @@ static bool performBranchToCommonDestFolding(BranchInst *BI, BranchInst *PBI, if (auto *SI = dyn_cast<SelectInst>(PBI->getCondition())) if (!MDWeights.empty()) { assert(isSelectInRoleOfConjunctionOrDisjunction(SI)); - setBranchWeights(SI, MDWeights[0], MDWeights[1], - /*IsExpected=*/false); + setFittedBranchWeights(*SI, {MDWeights[0], MDWeights[1]}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } ++NumFoldBranchToCommonDest; @@ -4478,9 +4435,9 @@ static bool mergeConditionalStoreToAddress( if (InvertQCond) std::swap(QWeights[0], QWeights[1]); auto CombinedWeights = getDisjunctionWeights(PWeights, QWeights); - setBranchWeights(PostBB->getTerminator(), CombinedWeights[0], - CombinedWeights[1], - /*IsExpected=*/false); + setFittedBranchWeights(*PostBB->getTerminator(), + {CombinedWeights[0], CombinedWeights[1]}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } QB.SetInsertPoint(T); @@ -4836,10 +4793,9 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI, uint64_t NewWeights[2] = {PredCommon * (SuccCommon + SuccOther) + PredOther * SuccCommon, PredOther * SuccOther}; - // Halve the weights if any of them cannot fit in an uint32_t - fitWeights(NewWeights); - setBranchWeights(PBI, NewWeights[0], NewWeights[1], /*IsExpected=*/false); + setFittedBranchWeights(*PBI, NewWeights, /*IsExpected=*/false, + /*ElideAllZero=*/true); // Cond may be a select instruction with the first operand set to "true", or // the second to "false" (see how createLogicalOp works for `and` and `or`) if (!ProfcheckDisableMetadataFixes) @@ -4849,8 +4805,8 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI, assert(dyn_cast<SelectInst>(SI)->getCondition() == PBICond); // The corresponding probabilities are what was referred to above as // PredCommon and PredOther. - setBranchWeights(SI, PredCommon, PredOther, - /*IsExpected=*/false); + setFittedBranchWeights(*SI, {PredCommon, PredOther}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } } @@ -4876,8 +4832,8 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI, if (HasWeights) { uint64_t TrueWeight = PBIOp ? PredFalseWeight : PredTrueWeight; uint64_t FalseWeight = PBIOp ? PredTrueWeight : PredFalseWeight; - setBranchWeights(NV, TrueWeight, FalseWeight, - /*IsExpected=*/false); + setFittedBranchWeights(*NV, {TrueWeight, FalseWeight}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } } } @@ -4940,7 +4896,8 @@ bool SimplifyCFGOpt::simplifyTerminatorOnSelect(Instruction *OldTerm, // Create a conditional branch sharing the condition of the select. BranchInst *NewBI = Builder.CreateCondBr(Cond, TrueBB, FalseBB); if (TrueWeight != FalseWeight) - setBranchWeights(NewBI, TrueWeight, FalseWeight, /*IsExpected=*/false); + setBranchWeights(*NewBI, {TrueWeight, FalseWeight}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } } else if (KeepEdge1 && (KeepEdge2 || TrueBB == FalseBB)) { // Neither of the selected blocks were successors, so this @@ -5889,7 +5846,8 @@ bool SimplifyCFGOpt::turnSwitchRangeIntoICmp(SwitchInst *SI, TrueWeight /= 2; FalseWeight /= 2; } - setBranchWeights(NewBI, TrueWeight, FalseWeight, /*IsExpected=*/false); + setFittedBranchWeights(*NewBI, {TrueWeight, FalseWeight}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } } @@ -6364,9 +6322,9 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, // BranchWeights. We want the probability and negative probability of // Condition == SecondCase. assert(BranchWeights.size() == 3); - setBranchWeights(SI, BranchWeights[2], - BranchWeights[0] + BranchWeights[1], - /*IsExpected=*/false); + setBranchWeights( + *SI, {BranchWeights[2], BranchWeights[0] + BranchWeights[1]}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } } Value *ValueCompare = @@ -6381,9 +6339,10 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, size_t FirstCasePos = (Condition != nullptr); size_t SecondCasePos = FirstCasePos + 1; uint32_t DefaultCase = (Condition != nullptr) ? BranchWeights[0] : 0; - setBranchWeights(SI, BranchWeights[FirstCasePos], - DefaultCase + BranchWeights[SecondCasePos], - /*IsExpected=*/false); + setBranchWeights(*SI, + {BranchWeights[FirstCasePos], + DefaultCase + BranchWeights[SecondCasePos]}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } return Ret; } @@ -6427,8 +6386,10 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, // We know there's a Default case. We base the resulting branch // weights off its probability. assert(BranchWeights.size() >= 2); - setBranchWeights(SI, accumulate(drop_begin(BranchWeights), 0), - BranchWeights[0], /*IsExpected=*/false); + setBranchWeights( + *SI, + {accumulate(drop_begin(BranchWeights), 0U), BranchWeights[0]}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } return Ret; } @@ -6451,8 +6412,10 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult); if (auto *SI = dyn_cast<SelectInst>(Ret); SI && HasBranchWeights) { assert(BranchWeights.size() >= 2); - setBranchWeights(SI, accumulate(drop_begin(BranchWeights), 0), - BranchWeights[0], /*IsExpected=*/false); + setBranchWeights( + *SI, + {accumulate(drop_begin(BranchWeights), 0U), BranchWeights[0]}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } return Ret; } @@ -6469,8 +6432,9 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult); if (auto *SI = dyn_cast<SelectInst>(Ret); SI && HasBranchWeights) { assert(BranchWeights.size() >= 2); - setBranchWeights(SI, accumulate(drop_begin(BranchWeights), 0), - BranchWeights[0], /*IsExpected=*/false); + setBranchWeights( + *SI, {accumulate(drop_begin(BranchWeights), 0U), BranchWeights[0]}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } return Ret; } @@ -8152,8 +8116,8 @@ static bool mergeNestedCondBranch(BranchInst *BI, DomTreeUpdater *DTU) { if (HasWeight) { uint64_t Weights[2] = {BBTWeight * BB1FWeight + BBFWeight * BB2TWeight, BBTWeight * BB1TWeight + BBFWeight * BB2FWeight}; - fitWeights(Weights); - setBranchWeights(BI, Weights[0], Weights[1], /*IsExpected=*/false); + setFittedBranchWeights(*BI, Weights, /*IsExpected=*/false, + /*ElideAllZero=*/true); } return true; } diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp index ff35db1..7d376c3 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -293,9 +293,8 @@ void LoopVectorizeHints::getHintsFromMetadata() { } void LoopVectorizeHints::setHint(StringRef Name, Metadata *Arg) { - if (!Name.starts_with(Prefix())) + if (!Name.consume_front(Prefix())) return; - Name = Name.substr(Prefix().size(), StringRef::npos); const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); if (!C) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 12fb46d..e5d6c81 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -5699,6 +5699,20 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { Worklist.push_back(InstOp); } + auto UpdateMemOpUserCost = [this, VF](LoadInst *LI) { + // If there are direct memory op users of the newly scalarized load, + // their cost may have changed because there's no scalarization + // overhead for the operand. Update it. + for (User *U : LI->users()) { + if (!isa<LoadInst, StoreInst>(U)) + continue; + if (getWideningDecision(cast<Instruction>(U), VF) != CM_Scalarize) + continue; + setWideningDecision( + cast<Instruction>(U), VF, CM_Scalarize, + getMemInstScalarizationCost(cast<Instruction>(U), VF)); + } + }; for (auto *I : AddrDefs) { if (isa<LoadInst>(I)) { // Setting the desired widening decision should ideally be handled in @@ -5708,21 +5722,24 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { InstWidening Decision = getWideningDecision(I, VF); if (Decision == CM_Widen || Decision == CM_Widen_Reverse || (!isPredicatedInst(I) && !Legal->isUniformMemOp(*I, VF) && - Decision == CM_Scalarize)) + Decision == CM_Scalarize)) { // Scalarize a widened load of address or update the cost of a scalar // load of an address. setWideningDecision( I, VF, CM_Scalarize, (VF.getKnownMinValue() * getMemoryInstructionCost(I, ElementCount::getFixed(1)))); - else if (const auto *Group = getInterleavedAccessGroup(I)) { + UpdateMemOpUserCost(cast<LoadInst>(I)); + } else if (const auto *Group = getInterleavedAccessGroup(I)) { // Scalarize an interleave group of address loads. for (unsigned I = 0; I < Group->getFactor(); ++I) { - if (Instruction *Member = Group->getMember(I)) + if (Instruction *Member = Group->getMember(I)) { setWideningDecision( Member, VF, CM_Scalarize, (VF.getKnownMinValue() * getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); + UpdateMemOpUserCost(cast<LoadInst>(Member)); + } } } } else { @@ -9521,55 +9538,52 @@ static SmallVector<Instruction *> preparePlanForEpilogueVectorLoop( VPBasicBlock *Header = VectorLoop->getEntryBasicBlock(); Header->setName("vec.epilog.vector.body"); - DenseMap<Value *, Value *> ToFrozen; - SmallVector<Instruction *> InstsToMove; // Ensure that the start values for all header phi recipes are updated before // vectorizing the epilogue loop. - for (VPRecipeBase &R : Header->phis()) { - if (auto *IV = dyn_cast<VPCanonicalIVPHIRecipe>(&R)) { - // When vectorizing the epilogue loop, the canonical induction start - // value needs to be changed from zero to the value after the main - // vector loop. Find the resume value created during execution of the main - // VPlan. It must be the first phi in the loop preheader. - // FIXME: Improve modeling for canonical IV start values in the epilogue - // loop. - using namespace llvm::PatternMatch; - PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin(); - for (Value *Inc : EPResumeVal->incoming_values()) { - if (match(Inc, m_SpecificInt(0))) - continue; - assert(!EPI.VectorTripCount && - "Must only have a single non-zero incoming value"); - EPI.VectorTripCount = Inc; - } - // If we didn't find a non-zero vector trip count, all incoming values - // must be zero, which also means the vector trip count is zero. Pick the - // first zero as vector trip count. - // TODO: We should not choose VF * UF so the main vector loop is known to - // be dead. - if (!EPI.VectorTripCount) { - assert( - EPResumeVal->getNumIncomingValues() > 0 && - all_of(EPResumeVal->incoming_values(), - [](Value *Inc) { return match(Inc, m_SpecificInt(0)); }) && - "all incoming values must be 0"); - EPI.VectorTripCount = EPResumeVal->getOperand(0); - } - VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal); - assert(all_of(IV->users(), - [](const VPUser *U) { - return isa<VPScalarIVStepsRecipe>(U) || - isa<VPDerivedIVRecipe>(U) || - cast<VPRecipeBase>(U)->isScalarCast() || - cast<VPInstruction>(U)->getOpcode() == - Instruction::Add; - }) && - "the canonical IV should only be used by its increment or " - "ScalarIVSteps when resetting the start value"); - IV->setOperand(0, VPV); + VPCanonicalIVPHIRecipe *IV = Plan.getCanonicalIV(); + // When vectorizing the epilogue loop, the canonical induction start + // value needs to be changed from zero to the value after the main + // vector loop. Find the resume value created during execution of the main + // VPlan. It must be the first phi in the loop preheader. + // FIXME: Improve modeling for canonical IV start values in the epilogue + // loop. + using namespace llvm::PatternMatch; + PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin(); + for (Value *Inc : EPResumeVal->incoming_values()) { + if (match(Inc, m_SpecificInt(0))) continue; - } + assert(!EPI.VectorTripCount && + "Must only have a single non-zero incoming value"); + EPI.VectorTripCount = Inc; + } + // If we didn't find a non-zero vector trip count, all incoming values + // must be zero, which also means the vector trip count is zero. Pick the + // first zero as vector trip count. + // TODO: We should not choose VF * UF so the main vector loop is known to + // be dead. + if (!EPI.VectorTripCount) { + assert(EPResumeVal->getNumIncomingValues() > 0 && + all_of(EPResumeVal->incoming_values(), + [](Value *Inc) { return match(Inc, m_SpecificInt(0)); }) && + "all incoming values must be 0"); + EPI.VectorTripCount = EPResumeVal->getOperand(0); + } + VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal); + assert(all_of(IV->users(), + [](const VPUser *U) { + return isa<VPScalarIVStepsRecipe>(U) || + isa<VPDerivedIVRecipe>(U) || + cast<VPRecipeBase>(U)->isScalarCast() || + cast<VPInstruction>(U)->getOpcode() == + Instruction::Add; + }) && + "the canonical IV should only be used by its increment or " + "ScalarIVSteps when resetting the start value"); + IV->setOperand(0, VPV); + DenseMap<Value *, Value *> ToFrozen; + SmallVector<Instruction *> InstsToMove; + for (VPRecipeBase &R : drop_begin(Header->phis())) { Value *ResumeV = nullptr; // TODO: Move setting of resume values to prepareToExecute. if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) { diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index f77d587..fedca65 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -2241,10 +2241,9 @@ public: /// TODO: If load combining is allowed in the IR optimizer, this analysis /// may not be necessary. bool isLoadCombineCandidate(ArrayRef<Value *> Stores) const; - bool isStridedLoad(ArrayRef<Value *> VL, ArrayRef<Value *> PointerOps, - ArrayRef<unsigned> Order, const TargetTransformInfo &TTI, - const DataLayout &DL, ScalarEvolution &SE, - const int64_t Diff, StridedPtrInfo &SPtrInfo) const; + bool isStridedLoad(ArrayRef<Value *> PointerOps, Type *ScalarTy, + Align Alignment, const int64_t Diff, Value *Ptr0, + Value *PtrN, StridedPtrInfo &SPtrInfo) const; /// Checks if the given array of loads can be represented as a vectorized, /// scatter or just simple gather. @@ -6824,13 +6823,10 @@ isMaskedLoadCompress(ArrayRef<Value *> VL, ArrayRef<Value *> PointerOps, /// 4. Any pointer operand is an instruction with the users outside of the /// current graph (for masked gathers extra extractelement instructions /// might be required). -bool BoUpSLP::isStridedLoad(ArrayRef<Value *> VL, ArrayRef<Value *> PointerOps, - ArrayRef<unsigned> Order, - const TargetTransformInfo &TTI, - const DataLayout &DL, ScalarEvolution &SE, - const int64_t Diff, - StridedPtrInfo &SPtrInfo) const { - const size_t Sz = VL.size(); +bool BoUpSLP::isStridedLoad(ArrayRef<Value *> PointerOps, Type *ScalarTy, + Align Alignment, const int64_t Diff, Value *Ptr0, + Value *PtrN, StridedPtrInfo &SPtrInfo) const { + const size_t Sz = PointerOps.size(); if (Diff % (Sz - 1) != 0) return false; @@ -6842,7 +6838,6 @@ bool BoUpSLP::isStridedLoad(ArrayRef<Value *> VL, ArrayRef<Value *> PointerOps, }); const uint64_t AbsoluteDiff = std::abs(Diff); - Type *ScalarTy = VL.front()->getType(); auto *VecTy = getWidenedType(ScalarTy, Sz); if (IsAnyPointerUsedOutGraph || (AbsoluteDiff > Sz && @@ -6853,20 +6848,9 @@ bool BoUpSLP::isStridedLoad(ArrayRef<Value *> VL, ArrayRef<Value *> PointerOps, int64_t Stride = Diff / static_cast<int64_t>(Sz - 1); if (Diff != Stride * static_cast<int64_t>(Sz - 1)) return false; - Align Alignment = - cast<LoadInst>(Order.empty() ? VL.front() : VL[Order.front()]) - ->getAlign(); - if (!TTI.isLegalStridedLoadStore(VecTy, Alignment)) + if (!TTI->isLegalStridedLoadStore(VecTy, Alignment)) return false; - Value *Ptr0; - Value *PtrN; - if (Order.empty()) { - Ptr0 = PointerOps.front(); - PtrN = PointerOps.back(); - } else { - Ptr0 = PointerOps[Order.front()]; - PtrN = PointerOps[Order.back()]; - } + // Iterate through all pointers and check if all distances are // unique multiple of Dist. SmallSet<int64_t, 4> Dists; @@ -6875,14 +6859,14 @@ bool BoUpSLP::isStridedLoad(ArrayRef<Value *> VL, ArrayRef<Value *> PointerOps, if (Ptr == PtrN) Dist = Diff; else if (Ptr != Ptr0) - Dist = *getPointersDiff(ScalarTy, Ptr0, ScalarTy, Ptr, DL, SE); + Dist = *getPointersDiff(ScalarTy, Ptr0, ScalarTy, Ptr, *DL, *SE); // If the strides are not the same or repeated, we can't // vectorize. if (((Dist / Stride) * Stride) != Dist || !Dists.insert(Dist).second) break; } if (Dists.size() == Sz) { - Type *StrideTy = DL.getIndexType(Ptr0->getType()); + Type *StrideTy = DL->getIndexType(Ptr0->getType()); SPtrInfo.StrideVal = ConstantInt::get(StrideTy, Stride); SPtrInfo.Ty = getWidenedType(ScalarTy, Sz); return true; @@ -6971,7 +6955,11 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads( cast<Instruction>(V), UserIgnoreList); })) return LoadsState::CompressVectorize; - if (isStridedLoad(VL, PointerOps, Order, *TTI, *DL, *SE, *Diff, SPtrInfo)) + Align Alignment = + cast<LoadInst>(Order.empty() ? VL.front() : VL[Order.front()]) + ->getAlign(); + if (isStridedLoad(PointerOps, ScalarTy, Alignment, *Diff, Ptr0, PtrN, + SPtrInfo)) return LoadsState::StridedVectorize; } if (!TTI->isLegalMaskedGather(VecTy, CommonAlignment) || diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp index 81f1956..02eb637 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -968,24 +968,36 @@ void VPlan::execute(VPTransformState *State) { // logic generic during VPlan execution. State->CFG.DTU.applyUpdates( {{DominatorTree::Delete, ScalarPh, ScalarPh->getSingleSuccessor()}}); - } else { + } + ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT( + Entry); + // Generate code for the VPlan, in parts of the vector skeleton, loop body and + // successor blocks including the middle, exit and scalar preheader blocks. + for (VPBlockBase *Block : RPOT) + Block->execute(State); + + // If the original loop is unreachable, delete it and all its blocks. + if (!ScalarPhVPBB->hasPredecessors()) { + // DeleteDeadBlocks will remove single-entry phis. Remove them from the exit + // VPIRBBs in VPlan as well, otherwise we would retain references to deleted + // IR instructions. + for (VPIRBasicBlock *EB : getExitBlocks()) { + for (VPRecipeBase &R : make_early_inc_range(EB->phis())) { + if (R.getNumOperands() == 1) + R.eraseFromParent(); + } + } + Loop *OrigLoop = State->LI->getLoopFor(getScalarHeader()->getIRBasicBlock()); - // If the original loop is unreachable, we need to delete it. auto Blocks = OrigLoop->getBlocksVector(); Blocks.push_back(cast<VPIRBasicBlock>(ScalarPhVPBB)->getIRBasicBlock()); for (auto *BB : Blocks) State->LI->removeBlock(BB); + DeleteDeadBlocks(Blocks, &State->CFG.DTU); State->LI->erase(OrigLoop); } - ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT( - Entry); - // Generate code for the VPlan, in parts of the vector skeleton, loop body and - // successor blocks including the middle, exit and scalar preheader blocks. - for (VPBlockBase *Block : RPOT) - Block->execute(State); - State->CFG.DTU.flush(); VPBasicBlock *Header = vputils::getFirstLoopHeader(*this, State->VPDT); diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 10d704d..c167dd7 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -29,6 +29,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Twine.h" #include "llvm/ADT/ilist.h" @@ -2977,7 +2978,8 @@ public: /// the expression is elevated to connect the non-expression recipe with the /// VPExpressionRecipe itself. class VPExpressionRecipe : public VPSingleDefRecipe { - /// Recipes included in this VPExpressionRecipe. + /// Recipes included in this VPExpressionRecipe. This could contain + /// duplicates. SmallVector<VPSingleDefRecipe *> ExpressionRecipes; /// Temporary VPValues used for external operands of the expression, i.e. @@ -3039,8 +3041,11 @@ public: } ~VPExpressionRecipe() override { - for (auto *R : reverse(ExpressionRecipes)) - delete R; + SmallPtrSet<VPSingleDefRecipe *, 4> ExpressionRecipesSeen; + for (auto *R : reverse(ExpressionRecipes)) { + if (ExpressionRecipesSeen.insert(R).second) + delete R; + } for (VPValue *T : LiveInPlaceholders) delete T; } diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index 3a55710..46909a5 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -2755,10 +2755,7 @@ VPExpressionRecipe::VPExpressionRecipe( ExpressionTypes ExpressionType, ArrayRef<VPSingleDefRecipe *> ExpressionRecipes) : VPSingleDefRecipe(VPDef::VPExpressionSC, {}, {}), - ExpressionRecipes(SetVector<VPSingleDefRecipe *>( - ExpressionRecipes.begin(), ExpressionRecipes.end()) - .takeVector()), - ExpressionType(ExpressionType) { + ExpressionRecipes(ExpressionRecipes), ExpressionType(ExpressionType) { assert(!ExpressionRecipes.empty() && "Nothing to combine?"); assert( none_of(ExpressionRecipes, @@ -2802,14 +2799,22 @@ VPExpressionRecipe::VPExpressionRecipe( continue; addOperand(Op); LiveInPlaceholders.push_back(new VPValue()); - R->setOperand(Idx, LiveInPlaceholders.back()); } } + + // Replace each external operand with the first one created for it in + // LiveInPlaceholders. + for (auto *R : ExpressionRecipes) + for (auto const &[LiveIn, Tmp] : zip(operands(), LiveInPlaceholders)) + R->replaceUsesOfWith(LiveIn, Tmp); } void VPExpressionRecipe::decompose() { for (auto *R : ExpressionRecipes) - R->insertBefore(this); + // Since the list could contain duplicates, make sure the recipe hasn't + // already been inserted. + if (!R->getParent()) + R->insertBefore(this); for (const auto &[Idx, Op] : enumerate(operands())) LiveInPlaceholders[Idx]->replaceAllUsesWith(Op); diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index a73b083..acdb379 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -40,7 +40,7 @@ using namespace llvm; using namespace VPlanPatternMatch; -cl::opt<bool> EnableWideActiveLaneMask( +static cl::opt<bool> EnableWideActiveLaneMask( "enable-wide-lane-mask", cl::init(false), cl::Hidden, cl::desc("Enable use of wide get active lane mask instructions")); diff --git a/llvm/runtimes/CMakeLists.txt b/llvm/runtimes/CMakeLists.txt index 8399292..6f98eae 100644 --- a/llvm/runtimes/CMakeLists.txt +++ b/llvm/runtimes/CMakeLists.txt @@ -507,10 +507,14 @@ if(build_runtimes) endif() # Forward user-provived system configuration to runtimes for requirement introspection. - # CMAKE_PREFIX_PATH is the search path for CMake packages. + # CMAKE_PREFIX_PATH is the search path for CMake packages. In order to pass through + # the command line interface, the CMake semicolon separator needs to be replaced + # with $<SEMICOLON> if(CMAKE_PREFIX_PATH) - list(APPEND extra_cmake_args "-DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH}") + string(JOIN "$<SEMICOLON>" escaped_cmake_prefix_path ${CMAKE_PREFIX_PATH}) + list(APPEND extra_cmake_args "-DCMAKE_PREFIX_PATH=${escaped_cmake_prefix_path}") endif() + # CMAKE_PROGRAM_PATH is the search path for executables such as python. if(CMAKE_PROGRAM_PATH) list(APPEND extra_cmake_args "-DCMAKE_PROGRAM_PATH=${CMAKE_PROGRAM_PATH}") diff --git a/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json b/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json index 07fde84..ae36ff5 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json +++ b/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json @@ -87,6 +87,32 @@ "Function": [1, 2], "Pointer": [3, 4], "Constant": [5, 6], - "Variable": [7, 8] + "Variable": [7, 8], + "FCMP_false": [9, 10], + "FCMP_oeq": [11, 12], + "FCMP_ogt": [13, 14], + "FCMP_oge": [15, 16], + "FCMP_olt": [17, 18], + "FCMP_ole": [19, 20], + "FCMP_one": [21, 22], + "FCMP_ord": [23, 24], + "FCMP_uno": [25, 26], + "FCMP_ueq": [27, 28], + "FCMP_ugt": [29, 30], + "FCMP_uge": [31, 32], + "FCMP_ult": [33, 34], + "FCMP_ule": [35, 36], + "FCMP_une": [37, 38], + "FCMP_true": [39, 40], + "ICMP_eq": [41, 42], + "ICMP_ne": [43, 44], + "ICMP_ugt": [45, 46], + "ICMP_uge": [47, 48], + "ICMP_ult": [49, 50], + "ICMP_ule": [51, 52], + "ICMP_sgt": [53, 54], + "ICMP_sge": [55, 56], + "ICMP_slt": [57, 58], + "ICMP_sle": [59, 60] } } diff --git a/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_arg_vocab.json b/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_arg_vocab.json index 932b3a2..9003dc7 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_arg_vocab.json +++ b/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_arg_vocab.json @@ -86,6 +86,32 @@ "Function": [1, 2, 3], "Pointer": [4, 5, 6], "Constant": [7, 8, 9], - "Variable": [10, 11, 12] + "Variable": [10, 11, 12], + "FCMP_false": [13, 14, 15], + "FCMP_oeq": [16, 17, 18], + "FCMP_ogt": [19, 20, 21], + "FCMP_oge": [22, 23, 24], + "FCMP_olt": [25, 26, 27], + "FCMP_ole": [28, 29, 30], + "FCMP_one": [31, 32, 33], + "FCMP_ord": [34, 35, 36], + "FCMP_uno": [37, 38, 39], + "FCMP_ueq": [40, 41, 42], + "FCMP_ugt": [43, 44, 45], + "FCMP_uge": [46, 47, 48], + "FCMP_ult": [49, 50, 51], + "FCMP_ule": [52, 53, 54], + "FCMP_une": [55, 56, 57], + "FCMP_true": [58, 59, 60], + "ICMP_eq": [61, 62, 63], + "ICMP_ne": [64, 65, 66], + "ICMP_ugt": [67, 68, 69], + "ICMP_uge": [70, 71, 72], + "ICMP_ult": [73, 74, 75], + "ICMP_ule": [76, 77, 78], + "ICMP_sgt": [79, 80, 81], + "ICMP_sge": [82, 83, 84], + "ICMP_slt": [85, 86, 87], + "ICMP_sle": [88, 89, 90] } } diff --git a/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_opc_vocab.json b/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_opc_vocab.json index 19f3efe..7ef8549 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_opc_vocab.json +++ b/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_opc_vocab.json @@ -47,6 +47,7 @@ "FPTrunc": [133, 134, 135], "FPExt": [136, 137, 138], "PtrToInt": [139, 140, 141], + "PtrToAddr": [202, 203, 204], "IntToPtr": [142, 143, 144], "BitCast": [145, 146, 147], "AddrSpaceCast": [148, 149, 150], @@ -86,6 +87,32 @@ "Function": [0, 0, 0], "Pointer": [0, 0, 0], "Constant": [0, 0, 0], - "Variable": [0, 0, 0] + "Variable": [0, 0, 0], + "FCMP_false": [0, 0, 0], + "FCMP_oeq": [0, 0, 0], + "FCMP_ogt": [0, 0, 0], + "FCMP_oge": [0, 0, 0], + "FCMP_olt": [0, 0, 0], + "FCMP_ole": [0, 0, 0], + "FCMP_one": [0, 0, 0], + "FCMP_ord": [0, 0, 0], + "FCMP_uno": [0, 0, 0], + "FCMP_ueq": [0, 0, 0], + "FCMP_ugt": [0, 0, 0], + "FCMP_uge": [0, 0, 0], + "FCMP_ult": [0, 0, 0], + "FCMP_ule": [0, 0, 0], + "FCMP_une": [0, 0, 0], + "FCMP_true": [0, 0, 0], + "ICMP_eq": [0, 0, 0], + "ICMP_ne": [0, 0, 0], + "ICMP_ugt": [0, 0, 0], + "ICMP_uge": [0, 0, 0], + "ICMP_ult": [0, 0, 0], + "ICMP_ule": [0, 0, 0], + "ICMP_sgt": [1, 1, 1], + "ICMP_sge": [0, 0, 0], + "ICMP_slt": [0, 0, 0], + "ICMP_sle": [0, 0, 0] } } diff --git a/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt b/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt index df7769c..d62b0dd 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt +++ b/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt @@ -82,3 +82,29 @@ Key: Function: [ 0.20 0.40 ] Key: Pointer: [ 0.60 0.80 ] Key: Constant: [ 1.00 1.20 ] Key: Variable: [ 1.40 1.60 ] +Key: FCMP_false: [ 1.80 2.00 ] +Key: FCMP_oeq: [ 2.20 2.40 ] +Key: FCMP_ogt: [ 2.60 2.80 ] +Key: FCMP_oge: [ 3.00 3.20 ] +Key: FCMP_olt: [ 3.40 3.60 ] +Key: FCMP_ole: [ 3.80 4.00 ] +Key: FCMP_one: [ 4.20 4.40 ] +Key: FCMP_ord: [ 4.60 4.80 ] +Key: FCMP_uno: [ 5.00 5.20 ] +Key: FCMP_ueq: [ 5.40 5.60 ] +Key: FCMP_ugt: [ 5.80 6.00 ] +Key: FCMP_uge: [ 6.20 6.40 ] +Key: FCMP_ult: [ 6.60 6.80 ] +Key: FCMP_ule: [ 7.00 7.20 ] +Key: FCMP_une: [ 7.40 7.60 ] +Key: FCMP_true: [ 7.80 8.00 ] +Key: ICMP_eq: [ 8.20 8.40 ] +Key: ICMP_ne: [ 8.60 8.80 ] +Key: ICMP_ugt: [ 9.00 9.20 ] +Key: ICMP_uge: [ 9.40 9.60 ] +Key: ICMP_ult: [ 9.80 10.00 ] +Key: ICMP_ule: [ 10.20 10.40 ] +Key: ICMP_sgt: [ 10.60 10.80 ] +Key: ICMP_sge: [ 11.00 11.20 ] +Key: ICMP_slt: [ 11.40 11.60 ] +Key: ICMP_sle: [ 11.80 12.00 ] diff --git a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt index f3ce809..e443adb 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt +++ b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt @@ -82,3 +82,29 @@ Key: Function: [ 0.50 1.00 ] Key: Pointer: [ 1.50 2.00 ] Key: Constant: [ 2.50 3.00 ] Key: Variable: [ 3.50 4.00 ] +Key: FCMP_false: [ 4.50 5.00 ] +Key: FCMP_oeq: [ 5.50 6.00 ] +Key: FCMP_ogt: [ 6.50 7.00 ] +Key: FCMP_oge: [ 7.50 8.00 ] +Key: FCMP_olt: [ 8.50 9.00 ] +Key: FCMP_ole: [ 9.50 10.00 ] +Key: FCMP_one: [ 10.50 11.00 ] +Key: FCMP_ord: [ 11.50 12.00 ] +Key: FCMP_uno: [ 12.50 13.00 ] +Key: FCMP_ueq: [ 13.50 14.00 ] +Key: FCMP_ugt: [ 14.50 15.00 ] +Key: FCMP_uge: [ 15.50 16.00 ] +Key: FCMP_ult: [ 16.50 17.00 ] +Key: FCMP_ule: [ 17.50 18.00 ] +Key: FCMP_une: [ 18.50 19.00 ] +Key: FCMP_true: [ 19.50 20.00 ] +Key: ICMP_eq: [ 20.50 21.00 ] +Key: ICMP_ne: [ 21.50 22.00 ] +Key: ICMP_ugt: [ 22.50 23.00 ] +Key: ICMP_uge: [ 23.50 24.00 ] +Key: ICMP_ult: [ 24.50 25.00 ] +Key: ICMP_ule: [ 25.50 26.00 ] +Key: ICMP_sgt: [ 26.50 27.00 ] +Key: ICMP_sge: [ 27.50 28.00 ] +Key: ICMP_slt: [ 28.50 29.00 ] +Key: ICMP_sle: [ 29.50 30.00 ] diff --git a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt index 72b25b9..7fb6043 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt +++ b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt @@ -82,3 +82,29 @@ Key: Function: [ 0.00 0.00 ] Key: Pointer: [ 0.00 0.00 ] Key: Constant: [ 0.00 0.00 ] Key: Variable: [ 0.00 0.00 ] +Key: FCMP_false: [ 0.00 0.00 ] +Key: FCMP_oeq: [ 0.00 0.00 ] +Key: FCMP_ogt: [ 0.00 0.00 ] +Key: FCMP_oge: [ 0.00 0.00 ] +Key: FCMP_olt: [ 0.00 0.00 ] +Key: FCMP_ole: [ 0.00 0.00 ] +Key: FCMP_one: [ 0.00 0.00 ] +Key: FCMP_ord: [ 0.00 0.00 ] +Key: FCMP_uno: [ 0.00 0.00 ] +Key: FCMP_ueq: [ 0.00 0.00 ] +Key: FCMP_ugt: [ 0.00 0.00 ] +Key: FCMP_uge: [ 0.00 0.00 ] +Key: FCMP_ult: [ 0.00 0.00 ] +Key: FCMP_ule: [ 0.00 0.00 ] +Key: FCMP_une: [ 0.00 0.00 ] +Key: FCMP_true: [ 0.00 0.00 ] +Key: ICMP_eq: [ 0.00 0.00 ] +Key: ICMP_ne: [ 0.00 0.00 ] +Key: ICMP_ugt: [ 0.00 0.00 ] +Key: ICMP_uge: [ 0.00 0.00 ] +Key: ICMP_ult: [ 0.00 0.00 ] +Key: ICMP_ule: [ 0.00 0.00 ] +Key: ICMP_sgt: [ 0.00 0.00 ] +Key: ICMP_sge: [ 0.00 0.00 ] +Key: ICMP_slt: [ 0.00 0.00 ] +Key: ICMP_sle: [ 0.00 0.00 ] diff --git a/llvm/test/Analysis/IR2Vec/if-else.ll b/llvm/test/Analysis/IR2Vec/if-else.ll index fe53247..804c1ca 100644 --- a/llvm/test/Analysis/IR2Vec/if-else.ll +++ b/llvm/test/Analysis/IR2Vec/if-else.ll @@ -29,7 +29,7 @@ return: ; preds = %if.else, %if.then ; CHECK: Basic block vectors: ; CHECK-NEXT: Basic block: entry: -; CHECK-NEXT: [ 816.00 825.00 834.00 ] +; CHECK-NEXT: [ 816.20 825.20 834.20 ] ; CHECK-NEXT: Basic block: if.then: ; CHECK-NEXT: [ 195.00 198.00 201.00 ] ; CHECK-NEXT: Basic block: if.else: diff --git a/llvm/test/Analysis/IR2Vec/unreachable.ll b/llvm/test/Analysis/IR2Vec/unreachable.ll index b0e3e49..9be0ee1 100644 --- a/llvm/test/Analysis/IR2Vec/unreachable.ll +++ b/llvm/test/Analysis/IR2Vec/unreachable.ll @@ -33,7 +33,7 @@ return: ; preds = %if.else, %if.then ; CHECK: Basic block vectors: ; CHECK-NEXT: Basic block: entry: -; CHECK-NEXT: [ 816.00 825.00 834.00 ] +; CHECK-NEXT: [ 816.20 825.20 834.20 ] ; CHECK-NEXT: Basic block: if.then: ; CHECK-NEXT: [ 195.00 198.00 201.00 ] ; CHECK-NEXT: Basic block: if.else: diff --git a/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll b/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll index 1e21fbf..e1c6230 100644 --- a/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll +++ b/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll @@ -188,3 +188,43 @@ loop: exit: ret void } + +define noundef i64 @udiv_mul_common_vscale_factor(i64 %a, i64 %b) { +; CHECK-LABEL: 'udiv_mul_common_vscale_factor' +; CHECK-NEXT: Classifying expressions for: @udiv_mul_common_vscale_factor +; CHECK-NEXT: %vs = call i64 @llvm.vscale.i64() +; CHECK-NEXT: --> vscale U: [1,0) S: [1,0) +; CHECK-NEXT: %a.vs = mul i64 %a, %vs +; CHECK-NEXT: --> (vscale * %a) U: full-set S: full-set +; CHECK-NEXT: %b.vs = mul i64 %b, %vs +; CHECK-NEXT: --> (vscale * %b) U: full-set S: full-set +; CHECK-NEXT: %div = udiv i64 %a.vs, %b.vs +; CHECK-NEXT: --> ((vscale * %a) /u (vscale * %b)) U: full-set S: full-set +; CHECK-NEXT: Determining loop execution counts for: @udiv_mul_common_vscale_factor +; + %vs = call i64 @llvm.vscale() + %a.vs = mul i64 %a, %vs + %b.vs = mul i64 %b, %vs + %div = udiv i64 %a.vs, %b.vs + ret i64 %div +} + +define noundef i64 @udiv_mul_nuw_common_vscale_factor(i64 %a, i64 %b) { +; CHECK-LABEL: 'udiv_mul_nuw_common_vscale_factor' +; CHECK-NEXT: Classifying expressions for: @udiv_mul_nuw_common_vscale_factor +; CHECK-NEXT: %vs = call i64 @llvm.vscale.i64() +; CHECK-NEXT: --> vscale U: [1,0) S: [1,0) +; CHECK-NEXT: %a.vs = mul nuw i64 %a, %vs +; CHECK-NEXT: --> (vscale * %a)<nuw> U: full-set S: full-set +; CHECK-NEXT: %b.vs = mul nuw i64 %b, %vs +; CHECK-NEXT: --> (vscale * %b)<nuw> U: full-set S: full-set +; CHECK-NEXT: %div = udiv i64 %a.vs, %b.vs +; CHECK-NEXT: --> (%a /u %b) U: full-set S: full-set +; CHECK-NEXT: Determining loop execution counts for: @udiv_mul_nuw_common_vscale_factor +; + %vs = call i64 @llvm.vscale() + %a.vs = mul nuw i64 %a, %vs + %b.vs = mul nuw i64 %b, %vs + %div = udiv i64 %a.vs, %b.vs + ret i64 %div +} diff --git a/llvm/test/CMakeLists.txt b/llvm/test/CMakeLists.txt index 4db7663..32c7c64 100644 --- a/llvm/test/CMakeLists.txt +++ b/llvm/test/CMakeLists.txt @@ -71,7 +71,6 @@ set(LLVM_TEST_DEPENDS ${LLVM_TEST_DEPENDS_COMMON} BugpointPasses LLVMWindowsDriver - UnitTests bugpoint llc lli @@ -270,10 +269,11 @@ add_lit_testsuites(LLVM ${CMAKE_CURRENT_SOURCE_DIR} ${exclude_from_check_all} DEPENDS ${LLVM_TEST_DEPENDS} FOLDER "Tests/Subdirectories" - SKIP "^FileCheck" "^TableGen" + SKIP "^FileCheck" "^TableGen" "^Unit" ) add_subdirectory(FileCheck) add_subdirectory(TableGen) +add_subdirectory(Unit) # Setup an alias for 'check-all'. add_custom_target(check) diff --git a/llvm/test/CodeGen/AArch64/pr161420.ll b/llvm/test/CodeGen/AArch64/pr161420.ll new file mode 100644 index 0000000..515a1bf --- /dev/null +++ b/llvm/test/CodeGen/AArch64/pr161420.ll @@ -0,0 +1,51 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s | FileCheck %s + +target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128-Fn32" +target triple = "arm64-apple-macosx15.0.0" + +; From: https://github.com/llvm/llvm-project/issues/161420. This test checks that +; two `luti4` instructions are emitted. FIXME: This is currently broken! +define void @pluto(ptr %arg, ptr %arg1, ptr %arg2, ptr %arg3) #0 { +; CHECK-LABEL: pluto: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: mov w8, #0 ; =0x0 +; CHECK-NEXT: ldr zt0, [x1] +; CHECK-NEXT: ldr z0, [x3] +; CHECK-NEXT: ptrue pn8.h +; CHECK-NEXT: ld1h { z4.h - z7.h }, pn8/z, [x0] +; CHECK-NEXT: luti4 { z0.h - z3.h }, zt0, z0[0] +; CHECK-NEXT: fmla za.h[w8, 2, vgx4], { z4.h - z7.h }, { z0.h - z3.h } +; CHECK-NEXT: ret +bb: + tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %arg1) + %load = load <vscale x 16 x i8>, ptr %arg3, align 16 + %call = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c16() + %call4 = tail call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount") %call, ptr %arg) + %extractvalue = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call4, 0 + %extractvalue5 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call4, 1 + %extractvalue6 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call4, 2 + %extractvalue7 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call4, 3 + %call8 = tail call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8f16(i32 0, <vscale x 16 x i8> %load, i32 0) + %extractvalue9 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call8, 0 + %extractvalue10 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call8, 1 + %extractvalue11 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call8, 2 + %extractvalue12 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call8, 3 + tail call void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32 0, <vscale x 8 x half> %extractvalue, <vscale x 8 x half> %extractvalue5, <vscale x 8 x half> %extractvalue6, <vscale x 8 x half> %extractvalue7, <vscale x 8 x half> %extractvalue9, <vscale x 8 x half> %extractvalue10, <vscale x 8 x half> %extractvalue11, <vscale x 8 x half> %extractvalue12) + tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %arg2) + %call13 = tail call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8f16(i32 0, <vscale x 16 x i8> %load, i32 0) + %extractvalue14 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call13, 0 + %extractvalue15 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call13, 1 + %extractvalue16 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call13, 2 + %extractvalue17 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call13, 3 + tail call void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32 2, <vscale x 8 x half> %extractvalue, <vscale x 8 x half> %extractvalue5, <vscale x 8 x half> %extractvalue6, <vscale x 8 x half> %extractvalue7, <vscale x 8 x half> %extractvalue14, <vscale x 8 x half> %extractvalue15, <vscale x 8 x half> %extractvalue16, <vscale x 8 x half> %extractvalue17) + ret void +} + +declare void @llvm.aarch64.sme.ldr.zt(i32, ptr) +declare target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c16() +declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount"), ptr) +declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8f16(i32 immarg, <vscale x 16 x i8>, i32 immarg) +declare void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>) + +attributes #0 = { mustprogress nofree noinline norecurse nosync nounwind ssp willreturn uwtable(sync) "aarch64_inout_za" "aarch64_inout_zt0" "aarch64_pstate_sm_enabled" "target-cpu"="apple-m1" "target-features"="+fp-armv8,+lse,+neon,+sme,+sme-f16f16,+sme2,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a" } diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll index 92d3e11..cf306e52 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll @@ -48,6 +48,24 @@ define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscal ret {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res } +; Tests multiple identical luti4 intrinsics with ZT0 loads interspersed, are not CSD'd. +; FIXME: This is currently broken! +define void @test_multiple_luti4_zt_i8(ptr %ptrA, ptr %ptrB, <vscale x 16 x i8> %x) { +; CHECK-LABEL: test_multiple_luti4_zt_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: luti4 { z0.s - z3.s }, zt0, z0[1] +; CHECK-NEXT: // fake_use: $z0 $z0_z1_z2_z3 +; CHECK-NEXT: ret + tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrA) + %res1 = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4f32(i32 0, <vscale x 16 x i8> %x, i32 1) + tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrB) + %res2 = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4f32(i32 0, <vscale x 16 x i8> %x, i32 1) + + call void (...) @llvm.fake.use({<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res1) + call void (...) @llvm.fake.use({<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res2) + ret void +} + declare {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8i16(i32, <vscale x 16 x i8>, i32) declare {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4i32(i32, <vscale x 16 x i8>, i32) declare {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8bf16(i32, <vscale x 16 x i8>, i32) diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll index 778f311..0024b70 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll @@ -14,4 +14,24 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %res } +; Tests multiple identical luti4 intrinsics with ZT0 loads interspersed, are not CSD'd. +; FIXME: This is currently broken! +define void @test_multiple_luti4_zt_i8(ptr %ptrA, ptr %ptrB, <vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1) #0 { +; CHECK-LABEL: test_multiple_luti4_zt_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: luti4 { z0.b - z3.b }, zt0, { z0, z1 } +; CHECK-NEXT: // fake_use: $z0 $z0_z1_z2_z3 +; CHECK-NEXT: ret + tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrA) + %res1 = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.aarch64.sme.luti4.zt.x4.nxv16i8(i32 0, <vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1) + tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrB) + %res2 = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.aarch64.sme.luti4.zt.x4.nxv16i8(i32 0, <vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1) + + call void (...) @llvm.fake.use({ <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res1) + call void (...) @llvm.fake.use({ <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res2) + ret void +} + attributes #0 = { "target-features"="+sme2,+sme-lutv2"} diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll index 666523c..ff618c0 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll @@ -1812,26 +1812,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_mov_b32_e32 v6, s16 -; GFX12-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1 +; GFX12-NEXT: v_mov_b32_e32 v8, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], null offen -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] +; GFX12-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], null offen ; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1854,27 +1854,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_mov_b32_e32 v6, s16 -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1 +; GFX11-NEXT: v_mov_b32_e32 v8, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], 0 offen -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX11-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], 0 offen ; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1906,28 +1906,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v6, s20 -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX908-NEXT: v_mov_b32_e32 v8, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB14_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1937,28 +1935,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v6, s20 -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX8-NEXT: v_mov_b32_e32 v8, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB14_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll index 3515028..007417c 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll @@ -1812,26 +1812,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_mov_b32_e32 v6, s16 -; GFX12-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1 +; GFX12-NEXT: v_mov_b32_e32 v8, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], null offen -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] +; GFX12-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], null offen ; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1854,27 +1854,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_mov_b32_e32 v6, s16 -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1 +; GFX11-NEXT: v_mov_b32_e32 v8, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], 0 offen -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX11-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], 0 offen ; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1906,28 +1906,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v6, s20 -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX908-NEXT: v_mov_b32_e32 v8, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB14_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1937,28 +1935,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v6, s20 -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX8-NEXT: v_mov_b32_e32 v8, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB14_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll index ba5a8e9..9e412b6 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll @@ -209,48 +209,48 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1) ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2 ; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v2 ; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s10, v3, v[1:2] -; GFX8-NEXT: v_mov_b32_e32 v6, s9 -; GFX8-NEXT: v_mov_b32_e32 v5, s11 +; GFX8-NEXT: v_sub_u32_e32 v6, vcc, s8, v0 ; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s11, v4, v[1:2] -; GFX8-NEXT: v_sub_u32_e32 v2, vcc, s8, v0 -; GFX8-NEXT: v_subb_u32_e64 v6, s[0:1], v6, v1, vcc +; GFX8-NEXT: v_mov_b32_e32 v2, s9 +; GFX8-NEXT: v_mov_b32_e32 v5, s11 +; GFX8-NEXT: v_subb_u32_e64 v7, s[0:1], v2, v1, vcc ; GFX8-NEXT: v_sub_u32_e64 v0, s[0:1], s9, v1 -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v6 +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v7 ; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v2 -; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v6 +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v6 +; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1] +; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v7 ; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v7, s[0:1] -; GFX8-NEXT: v_subrev_u32_e32 v7, vcc, s10, v2 +; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[0:1] +; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s10, v6 ; GFX8-NEXT: v_subbrev_u32_e64 v8, s[0:1], 0, v0, vcc ; GFX8-NEXT: v_add_u32_e64 v9, s[0:1], 1, v4 ; GFX8-NEXT: v_addc_u32_e64 v10, s[0:1], 0, v3, s[0:1] ; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v7 +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v2 ; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc ; GFX8-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[0:1] ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v8 -; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s10, v7 +; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s10, v2 ; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v12, s[0:1] ; GFX8-NEXT: v_add_u32_e64 v12, s[0:1], 1, v9 ; GFX8-NEXT: v_subbrev_u32_e32 v14, vcc, 0, v0, vcc ; GFX8-NEXT: v_addc_u32_e64 v13, s[0:1], 0, v10, s[0:1] ; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11 ; GFX8-NEXT: v_cndmask_b32_e32 v0, v9, v12, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v9, v10, v13, vcc ; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v9, v10, v13, vcc ; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, v0, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, v9, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v5, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v4, v8, v14, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v4, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc ; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, v9, s[0:1] ; GFX8-NEXT: v_mov_b32_e32 v5, s5 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v8, v14, vcc ; GFX8-NEXT: flat_store_dwordx2 v[4:5], v[0:1] ; GFX8-NEXT: v_mov_b32_e32 v0, s6 +; GFX8-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[0:1] ; GFX8-NEXT: v_mov_b32_e32 v1, s7 ; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; GFX8-NEXT: s_endpgm @@ -299,7 +299,6 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1) ; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v0 ; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v4, v1, vcc ; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s2, v3, 0 -; GFX9-NEXT: v_mov_b32_e32 v7, s19 ; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s2, v4, v[1:2] ; GFX9-NEXT: v_mul_hi_u32 v6, v3, v0 ; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s3, v3, v[1:2] @@ -346,30 +345,30 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1) ; GFX9-NEXT: v_add_u32_e32 v3, v4, v3 ; GFX9-NEXT: v_add3_u32 v3, v3, v2, v6 ; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s18, v3, v[1:2] -; GFX9-NEXT: v_mov_b32_e32 v6, s17 -; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: v_sub_co_u32_e32 v7, vcc, s16, v0 ; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s19, v5, v[1:2] -; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, s16, v0 -; GFX9-NEXT: v_subb_co_u32_e64 v6, s[0:1], v6, v1, vcc -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s19, v6 +; GFX9-NEXT: v_mov_b32_e32 v2, s17 +; GFX9-NEXT: v_mov_b32_e32 v4, s19 +; GFX9-NEXT: v_subb_co_u32_e64 v8, s[0:1], v2, v1, vcc +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s19, v8 ; GFX9-NEXT: v_sub_u32_e32 v0, s17, v1 ; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v2 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s19, v6 -; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v7, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v8, s[0:1] -; GFX9-NEXT: v_subrev_co_u32_e32 v8, vcc, s18, v2 +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v7 +; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1] +; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s19, v8 +; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v4, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[0:1] +; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s18, v7 ; GFX9-NEXT: v_subbrev_co_u32_e64 v9, s[0:1], 0, v0, vcc ; GFX9-NEXT: v_add_co_u32_e64 v10, s[0:1], 1, v5 ; GFX9-NEXT: v_addc_co_u32_e64 v11, s[0:1], 0, v3, s[0:1] ; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s19, v9 ; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v8 -; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v7, vcc +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v2 +; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v4, vcc ; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1] ; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s19, v9 -; GFX9-NEXT: v_subrev_co_u32_e32 v7, vcc, s18, v8 +; GFX9-NEXT: v_subrev_co_u32_e32 v4, vcc, s18, v2 ; GFX9-NEXT: v_cndmask_b32_e64 v12, v12, v13, s[0:1] ; GFX9-NEXT: v_add_co_u32_e64 v13, s[0:1], 1, v10 ; GFX9-NEXT: v_subbrev_co_u32_e32 v15, vcc, 0, v0, vcc @@ -378,14 +377,15 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1) ; GFX9-NEXT: v_cndmask_b32_e32 v0, v10, v13, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v10, v11, v14, vcc ; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v1 +; GFX9-NEXT: v_mov_b32_e32 v6, 0 ; GFX9-NEXT: v_cndmask_b32_e64 v0, v5, v0, s[0:1] ; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, v10, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v3, v8, v7, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v15, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v3, v6, v5, s[0:1] -; GFX9-NEXT: global_store_dwordx2 v4, v[0:1], s[12:13] -; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[14:15] +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v9, v15, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v2, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v3, s[0:1] +; GFX9-NEXT: global_store_dwordx2 v6, v[0:1], s[12:13] +; GFX9-NEXT: global_store_dwordx2 v6, v[2:3], s[14:15] ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: udivrem_i64: @@ -1070,6 +1070,7 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1 ; GFX8-NEXT: v_mul_lo_u32 v3, s8, v1 ; GFX8-NEXT: v_mul_hi_u32 v4, s8, v0 ; GFX8-NEXT: v_mul_hi_u32 v0, s9, v0 +; GFX8-NEXT: v_mov_b32_e32 v5, s13 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3 ; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4 @@ -1082,184 +1083,183 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1 ; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v3 ; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc ; GFX8-NEXT: v_add_u32_e32 v3, vcc, v4, v3 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v0, v2 +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v0, v2 ; GFX8-NEXT: v_mul_hi_u32 v4, s9, v1 -; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s12, v6, 0 +; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s12, v7, 0 ; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2 -; GFX8-NEXT: v_add_u32_e32 v7, vcc, v4, v2 -; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s12, v7, v[1:2] +; GFX8-NEXT: v_add_u32_e32 v8, vcc, v4, v2 +; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s12, v8, v[1:2] +; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s13, v7, v[1:2] ; GFX8-NEXT: v_mov_b32_e32 v3, s9 -; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s8, v0 -; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s13, v6, v[1:2] -; GFX8-NEXT: v_mov_b32_e32 v4, s13 -; GFX8-NEXT: v_subb_u32_e64 v0, s[0:1], v3, v1, vcc -; GFX8-NEXT: v_sub_u32_e64 v1, s[0:1], s9, v1 +; GFX8-NEXT: v_sub_u32_e32 v1, vcc, s8, v0 +; GFX8-NEXT: v_subb_u32_e64 v0, s[0:1], v3, v2, vcc +; GFX8-NEXT: v_sub_u32_e64 v2, s[0:1], s9, v2 ; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v0 -; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, -1, s[0:1] +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v1 +; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1] ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s13, v0 -; GFX8-NEXT: v_cndmask_b32_e64 v9, v2, v3, s[0:1] -; GFX8-NEXT: v_cvt_f32_u32_e32 v2, s15 -; GFX8-NEXT: v_cvt_f32_u32_e32 v3, s14 -; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v1, v4, vcc -; GFX8-NEXT: v_mul_f32_e32 v1, 0x4f800000, v2 -; GFX8-NEXT: v_add_f32_e32 v1, v1, v3 -; GFX8-NEXT: v_rcp_iflag_f32_e32 v1, v1 -; GFX8-NEXT: v_subrev_u32_e32 v10, vcc, s12, v8 -; GFX8-NEXT: v_subbrev_u32_e64 v11, s[0:1], 0, v5, vcc -; GFX8-NEXT: v_mul_f32_e32 v1, 0x5f7ffffc, v1 -; GFX8-NEXT: v_mul_f32_e32 v2, 0x2f800000, v1 -; GFX8-NEXT: v_trunc_f32_e32 v3, v2 -; GFX8-NEXT: v_mul_f32_e32 v2, 0xcf800000, v3 -; GFX8-NEXT: v_add_f32_e32 v1, v2, v1 -; GFX8-NEXT: v_cvt_u32_f32_e32 v12, v1 -; GFX8-NEXT: v_add_u32_e64 v13, s[0:1], 1, v6 -; GFX8-NEXT: v_addc_u32_e64 v14, s[0:1], 0, v7, s[0:1] -; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s2, v12, 0 -; GFX8-NEXT: v_cvt_u32_f32_e32 v15, v3 +; GFX8-NEXT: v_cndmask_b32_e64 v9, v3, v4, s[0:1] +; GFX8-NEXT: v_cvt_f32_u32_e32 v3, s15 +; GFX8-NEXT: v_cvt_f32_u32_e32 v4, s14 +; GFX8-NEXT: v_subb_u32_e32 v6, vcc, v2, v5, vcc +; GFX8-NEXT: v_mul_f32_e32 v2, 0x4f800000, v3 +; GFX8-NEXT: v_add_f32_e32 v2, v2, v4 +; GFX8-NEXT: v_rcp_iflag_f32_e32 v2, v2 +; GFX8-NEXT: v_subrev_u32_e32 v10, vcc, s12, v1 +; GFX8-NEXT: v_subbrev_u32_e64 v11, s[0:1], 0, v6, vcc +; GFX8-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v2 +; GFX8-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2 +; GFX8-NEXT: v_trunc_f32_e32 v4, v3 +; GFX8-NEXT: v_mul_f32_e32 v3, 0xcf800000, v4 +; GFX8-NEXT: v_add_f32_e32 v2, v3, v2 +; GFX8-NEXT: v_cvt_u32_f32_e32 v12, v2 +; GFX8-NEXT: v_add_u32_e64 v13, s[0:1], 1, v7 +; GFX8-NEXT: v_addc_u32_e64 v14, s[0:1], 0, v8, s[0:1] +; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v12, 0 +; GFX8-NEXT: v_cvt_u32_f32_e32 v15, v4 ; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v11 ; GFX8-NEXT: v_cndmask_b32_e64 v16, 0, -1, s[0:1] -; GFX8-NEXT: v_subb_u32_e32 v4, vcc, v5, v4, vcc -; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v15, v[2:3] +; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v6, v5, vcc +; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s2, v15, v[3:4] ; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v10 ; GFX8-NEXT: v_cndmask_b32_e64 v17, 0, -1, s[0:1] -; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s3, v12, v[2:3] +; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s3, v12, v[3:4] ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s13, v11 ; GFX8-NEXT: v_cndmask_b32_e64 v16, v16, v17, s[0:1] -; GFX8-NEXT: v_mul_lo_u32 v3, v15, v1 -; GFX8-NEXT: v_mul_lo_u32 v17, v12, v2 -; GFX8-NEXT: v_mul_hi_u32 v5, v12, v1 -; GFX8-NEXT: v_mul_hi_u32 v1, v15, v1 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v17 +; GFX8-NEXT: v_mul_lo_u32 v4, v15, v2 +; GFX8-NEXT: v_mul_lo_u32 v17, v12, v3 +; GFX8-NEXT: v_mul_hi_u32 v6, v12, v2 +; GFX8-NEXT: v_mul_hi_u32 v2, v15, v2 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v17 ; GFX8-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v5 -; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc -; GFX8-NEXT: v_mul_lo_u32 v5, v15, v2 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v17, v3 -; GFX8-NEXT: v_mul_hi_u32 v17, v12, v2 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, v5, v1 -; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v17 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v6 +; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc +; GFX8-NEXT: v_mul_lo_u32 v6, v15, v3 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v17, v4 +; GFX8-NEXT: v_mul_hi_u32 v17, v12, v3 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, v6, v2 +; GFX8-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v17 ; GFX8-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v17 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v17 ; GFX8-NEXT: v_add_u32_e32 v17, vcc, 1, v13 ; GFX8-NEXT: v_addc_u32_e32 v18, vcc, 0, v14, vcc ; GFX8-NEXT: v_subrev_u32_e32 v19, vcc, s12, v10 -; GFX8-NEXT: v_mul_hi_u32 v2, v15, v2 -; GFX8-NEXT: v_subbrev_u32_e32 v20, vcc, 0, v4, vcc -; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v3 -; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v3 -; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3 -; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v1 -; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s2, v12, 0 -; GFX8-NEXT: v_addc_u32_e32 v15, vcc, v15, v2, vcc +; GFX8-NEXT: v_mul_hi_u32 v3, v15, v3 +; GFX8-NEXT: v_subbrev_u32_e32 v20, vcc, 0, v5, vcc +; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4 +; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v6, v4 +; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v4 +; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v2 +; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[0:1], s2, v12, 0 +; GFX8-NEXT: v_addc_u32_e32 v15, vcc, v15, v3, vcc ; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16 -; GFX8-NEXT: v_cndmask_b32_e32 v2, v13, v17, vcc -; GFX8-NEXT: v_mov_b32_e32 v1, v4 -; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[0:1], s2, v15, v[1:2] +; GFX8-NEXT: v_cndmask_b32_e32 v3, v13, v17, vcc +; GFX8-NEXT: v_mov_b32_e32 v2, v5 +; GFX8-NEXT: v_mad_u64_u32 v[5:6], s[0:1], s2, v15, v[2:3] ; GFX8-NEXT: v_cndmask_b32_e32 v13, v14, v18, vcc ; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v9 -; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[2:3], s3, v12, v[4:5] -; GFX8-NEXT: v_cndmask_b32_e64 v1, v6, v2, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v2, v7, v13, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v19, vcc -; GFX8-NEXT: v_mul_lo_u32 v7, v15, v3 -; GFX8-NEXT: v_mul_lo_u32 v9, v12, v4 -; GFX8-NEXT: v_cndmask_b32_e64 v5, v8, v5, s[0:1] -; GFX8-NEXT: v_mul_hi_u32 v8, v12, v3 -; GFX8-NEXT: v_cndmask_b32_e32 v6, v11, v20, vcc -; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v9 -; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8 -; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc -; GFX8-NEXT: v_mul_lo_u32 v8, v15, v4 -; GFX8-NEXT: v_mul_hi_u32 v3, v15, v3 -; GFX8-NEXT: v_add_u32_e32 v7, vcc, v9, v7 +; GFX8-NEXT: v_mad_u64_u32 v[5:6], s[2:3], s3, v12, v[5:6] +; GFX8-NEXT: v_cndmask_b32_e64 v2, v7, v3, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v8, v13, s[0:1] +; GFX8-NEXT: v_mul_lo_u32 v7, v15, v4 +; GFX8-NEXT: v_mul_lo_u32 v8, v12, v5 ; GFX8-NEXT: v_mul_hi_u32 v9, v12, v4 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v8, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v19, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v10, v11, v20, vcc +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v9 -; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v9 -; GFX8-NEXT: v_mul_hi_u32 v4, v15, v4 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v7 +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v9 ; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc +; GFX8-NEXT: v_mul_lo_u32 v9, v15, v5 +; GFX8-NEXT: v_mul_hi_u32 v4, v15, v4 ; GFX8-NEXT: v_add_u32_e32 v7, vcc, v8, v7 +; GFX8-NEXT: v_mul_hi_u32 v8, v12, v5 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v9, v4 +; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v8 +; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v8, vcc, v9, v8 +; GFX8-NEXT: v_mul_hi_u32 v5, v15, v5 ; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v7 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v12, v3 -; GFX8-NEXT: v_addc_u32_e32 v4, vcc, v15, v4, vcc -; GFX8-NEXT: v_mul_lo_u32 v7, s11, v3 -; GFX8-NEXT: v_mul_lo_u32 v8, s10, v4 -; GFX8-NEXT: v_cndmask_b32_e64 v6, v0, v6, s[0:1] -; GFX8-NEXT: v_mul_hi_u32 v0, s10, v3 -; GFX8-NEXT: v_mul_hi_u32 v3, s11, v3 +; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v8, v7 +; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v7 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v12, v4 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v15, v5, vcc +; GFX8-NEXT: v_mul_lo_u32 v7, s11, v4 +; GFX8-NEXT: v_mul_lo_u32 v8, s10, v5 +; GFX8-NEXT: v_cndmask_b32_e64 v6, v1, v6, s[0:1] +; GFX8-NEXT: v_mul_hi_u32 v1, s10, v4 +; GFX8-NEXT: v_mul_hi_u32 v4, s11, v4 ; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v0, vcc, v7, v0 -; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc -; GFX8-NEXT: v_mul_lo_u32 v7, s11, v4 -; GFX8-NEXT: v_add_u32_e32 v0, vcc, v8, v0 -; GFX8-NEXT: v_mul_hi_u32 v8, s10, v4 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v7, v3 +; GFX8-NEXT: v_add_u32_e32 v1, vcc, v7, v1 +; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc +; GFX8-NEXT: v_mul_lo_u32 v7, s11, v5 +; GFX8-NEXT: v_add_u32_e32 v1, vcc, v8, v1 +; GFX8-NEXT: v_mul_hi_u32 v8, s10, v5 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v7, v4 ; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v8 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc ; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8 -; GFX8-NEXT: v_add_u32_e32 v9, vcc, v3, v0 -; GFX8-NEXT: v_mul_hi_u32 v8, s11, v4 -; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s14, v9, 0 -; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v0, vcc, v7, v0 -; GFX8-NEXT: v_add_u32_e32 v10, vcc, v8, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, v4 -; GFX8-NEXT: v_mad_u64_u32 v[7:8], s[0:1], s14, v10, v[0:1] -; GFX8-NEXT: v_mov_b32_e32 v4, s11 -; GFX8-NEXT: v_mov_b32_e32 v0, s15 -; GFX8-NEXT: v_mad_u64_u32 v[7:8], s[0:1], s15, v9, v[7:8] -; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s10, v3 -; GFX8-NEXT: v_subb_u32_e64 v11, s[0:1], v4, v7, vcc -; GFX8-NEXT: v_sub_u32_e64 v3, s[0:1], s11, v7 -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v11 +; GFX8-NEXT: v_add_u32_e32 v11, vcc, v4, v1 +; GFX8-NEXT: v_mul_hi_u32 v8, s11, v5 +; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[2:3], s14, v11, 0 +; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v1, vcc, v7, v1 +; GFX8-NEXT: v_add_u32_e32 v12, vcc, v8, v1 +; GFX8-NEXT: v_mov_b32_e32 v1, v5 +; GFX8-NEXT: v_mad_u64_u32 v[8:9], s[2:3], s14, v12, v[1:2] +; GFX8-NEXT: v_cndmask_b32_e64 v7, v0, v10, s[0:1] +; GFX8-NEXT: v_mov_b32_e32 v5, s15 +; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s15, v11, v[8:9] +; GFX8-NEXT: v_mov_b32_e32 v1, s11 +; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s10, v4 +; GFX8-NEXT: v_subb_u32_e64 v1, s[0:1], v1, v0, vcc +; GFX8-NEXT: v_sub_u32_e64 v0, s[0:1], s11, v0 +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v1 ; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1] ; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v8 -; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v11 -; GFX8-NEXT: v_subb_u32_e32 v3, vcc, v3, v0, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v7, s[0:1] -; GFX8-NEXT: v_subrev_u32_e32 v7, vcc, s14, v8 -; GFX8-NEXT: v_subbrev_u32_e64 v12, s[0:1], 0, v3, vcc -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v12 +; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[0:1] +; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v1 +; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[0:1] +; GFX8-NEXT: v_subrev_u32_e32 v9, vcc, s14, v8 +; GFX8-NEXT: v_subbrev_u32_e64 v10, s[0:1], 0, v0, vcc +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v10 ; GFX8-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v7 +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v9 ; GFX8-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v12 +; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v10 ; GFX8-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[0:1] -; GFX8-NEXT: v_add_u32_e64 v14, s[0:1], 1, v9 -; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v3, v0, vcc -; GFX8-NEXT: v_addc_u32_e64 v15, s[0:1], 0, v10, s[0:1] -; GFX8-NEXT: v_add_u32_e32 v3, vcc, 1, v14 +; GFX8-NEXT: v_add_u32_e64 v14, s[0:1], 1, v11 +; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc +; GFX8-NEXT: v_addc_u32_e64 v15, s[0:1], 0, v12, s[0:1] +; GFX8-NEXT: v_add_u32_e32 v5, vcc, 1, v14 ; GFX8-NEXT: v_addc_u32_e32 v16, vcc, 0, v15, vcc ; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13 -; GFX8-NEXT: v_subrev_u32_e64 v13, s[0:1], s14, v7 +; GFX8-NEXT: v_subrev_u32_e64 v13, s[0:1], s14, v9 ; GFX8-NEXT: v_subbrev_u32_e64 v0, s[0:1], 0, v0, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e32 v3, v14, v3, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc ; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4 -; GFX8-NEXT: v_cndmask_b32_e64 v3, v9, v3, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v4, v10, v14, s[0:1] -; GFX8-NEXT: v_mov_b32_e32 v10, s5 -; GFX8-NEXT: v_cndmask_b32_e32 v7, v7, v13, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v0, v12, v0, vcc -; GFX8-NEXT: v_mov_b32_e32 v9, s4 -; GFX8-NEXT: v_cndmask_b32_e64 v7, v8, v7, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v8, v11, v0, s[0:1] -; GFX8-NEXT: flat_store_dwordx4 v[9:10], v[1:4] +; GFX8-NEXT: v_cndmask_b32_e32 v9, v9, v13, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v0, v10, v0, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v5, v14, v5, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v8, v8, v9, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v9, v1, v0, s[0:1] +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: v_cndmask_b32_e64 v4, v11, v5, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v5, v12, v14, s[0:1] +; GFX8-NEXT: v_mov_b32_e32 v1, s5 +; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[2:5] ; GFX8-NEXT: v_mov_b32_e32 v0, s6 ; GFX8-NEXT: v_mov_b32_e32 v1, s7 -; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[5:8] +; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[6:9] ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: udivrem_v2i64: @@ -1355,11 +1355,11 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1 ; GFX9-NEXT: v_add_u32_e32 v3, v4, v3 ; GFX9-NEXT: v_add3_u32 v8, v3, v2, v5 ; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s4, v8, v[1:2] -; GFX9-NEXT: v_mov_b32_e32 v4, s17 ; GFX9-NEXT: v_mov_b32_e32 v5, s5 ; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s5, v7, v[1:2] +; GFX9-NEXT: v_mov_b32_e32 v3, s17 ; GFX9-NEXT: v_sub_co_u32_e32 v1, vcc, s16, v0 -; GFX9-NEXT: v_subb_co_u32_e64 v0, s[0:1], v4, v2, vcc +; GFX9-NEXT: v_subb_co_u32_e64 v0, s[0:1], v3, v2, vcc ; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s5, v0 ; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, s[0:1] ; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s4, v1 @@ -1387,7 +1387,7 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1 ; GFX9-NEXT: v_cvt_u32_f32_e32 v15, v4 ; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s5, v11 ; GFX9-NEXT: v_cndmask_b32_e64 v16, 0, -1, s[0:1] -; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v6, v5, vcc +; GFX9-NEXT: v_subb_co_u32_e32 v6, vcc, v6, v5, vcc ; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s2, v15, v[3:4] ; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s4, v10 ; GFX9-NEXT: v_cndmask_b32_e64 v17, 0, -1, s[0:1] @@ -1396,128 +1396,128 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1 ; GFX9-NEXT: v_cndmask_b32_e64 v16, v16, v17, s[0:1] ; GFX9-NEXT: v_mul_lo_u32 v4, v15, v2 ; GFX9-NEXT: v_mul_lo_u32 v17, v12, v3 -; GFX9-NEXT: v_mul_hi_u32 v6, v12, v2 +; GFX9-NEXT: v_mul_hi_u32 v5, v12, v2 ; GFX9-NEXT: v_mul_hi_u32 v2, v15, v2 ; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v17 ; GFX9-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v6 +; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v5 ; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc -; GFX9-NEXT: v_mul_lo_u32 v6, v15, v3 +; GFX9-NEXT: v_mul_lo_u32 v5, v15, v3 ; GFX9-NEXT: v_add_u32_e32 v4, v17, v4 ; GFX9-NEXT: v_mul_hi_u32 v17, v12, v3 ; GFX9-NEXT: v_mul_hi_u32 v3, v15, v3 -; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v6, v2 -; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v5, v2 +; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v17 ; GFX9-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc -; GFX9-NEXT: v_add_u32_e32 v6, v6, v17 +; GFX9-NEXT: v_add_u32_e32 v5, v5, v17 ; GFX9-NEXT: v_add_co_u32_e32 v17, vcc, 1, v13 ; GFX9-NEXT: v_addc_co_u32_e32 v18, vcc, 0, v14, vcc -; GFX9-NEXT: v_subrev_co_u32_e32 v19, vcc, s4, v10 -; GFX9-NEXT: v_subbrev_co_u32_e32 v20, vcc, 0, v5, vcc ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4 ; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc ; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, v12, v2 -; GFX9-NEXT: v_add3_u32 v3, v6, v4, v3 +; GFX9-NEXT: v_add3_u32 v3, v5, v4, v3 ; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[0:1], s2, v12, 0 ; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, v15, v3, vcc -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16 -; GFX9-NEXT: v_cndmask_b32_e32 v3, v13, v17, vcc ; GFX9-NEXT: v_mov_b32_e32 v2, v5 -; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[0:1], s2, v15, v[2:3] -; GFX9-NEXT: v_cndmask_b32_e32 v13, v14, v18, vcc +; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v15, v[2:3] +; GFX9-NEXT: v_subrev_co_u32_e32 v19, vcc, s4, v10 +; GFX9-NEXT: v_subbrev_co_u32_e32 v20, vcc, 0, v6, vcc +; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[0:1], s3, v12, v[2:3] +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16 +; GFX9-NEXT: v_cndmask_b32_e32 v13, v13, v17, vcc ; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v9 -; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[2:3], s3, v12, v[5:6] -; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v3, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v13, s[0:1] -; GFX9-NEXT: v_mul_lo_u32 v7, v15, v4 -; GFX9-NEXT: v_mul_lo_u32 v8, v12, v5 -; GFX9-NEXT: v_cndmask_b32_e32 v6, v10, v19, vcc -; GFX9-NEXT: v_mul_hi_u32 v10, v12, v4 -; GFX9-NEXT: v_cndmask_b32_e32 v9, v11, v20, vcc -; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v8 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v10 -; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc -; GFX9-NEXT: v_mul_lo_u32 v10, v15, v5 +; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v13, s[0:1] +; GFX9-NEXT: v_mul_lo_u32 v6, v15, v4 +; GFX9-NEXT: v_mul_lo_u32 v7, v12, v5 +; GFX9-NEXT: v_mul_hi_u32 v9, v12, v4 ; GFX9-NEXT: v_mul_hi_u32 v4, v15, v4 -; GFX9-NEXT: v_add_u32_e32 v7, v8, v7 -; GFX9-NEXT: v_mul_hi_u32 v8, v12, v5 +; GFX9-NEXT: v_cndmask_b32_e32 v14, v14, v18, vcc +; GFX9-NEXT: v_add_co_u32_e64 v6, s[2:3], v6, v7 +; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[2:3] +; GFX9-NEXT: v_add_co_u32_e64 v6, s[2:3], v6, v9 +; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[2:3] +; GFX9-NEXT: v_mul_lo_u32 v9, v15, v5 +; GFX9-NEXT: v_add_u32_e32 v6, v7, v6 +; GFX9-NEXT: v_mul_hi_u32 v7, v12, v5 ; GFX9-NEXT: v_mul_hi_u32 v5, v15, v5 -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v10, v4 -; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v7 -; GFX9-NEXT: v_add_u32_e32 v8, v10, v8 +; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v9, v4 +; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[2:3] +; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v4, v7 +; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[2:3] +; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v4, v6 +; GFX9-NEXT: v_add_u32_e32 v7, v9, v7 +; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[2:3] +; GFX9-NEXT: v_add3_u32 v5, v7, v6, v5 +; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v12, v4 +; GFX9-NEXT: v_addc_co_u32_e64 v5, s[2:3], v15, v5, s[2:3] +; GFX9-NEXT: v_mul_lo_u32 v6, s19, v4 +; GFX9-NEXT: v_mul_lo_u32 v7, s18, v5 +; GFX9-NEXT: v_mul_hi_u32 v9, s18, v4 +; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v14, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e32 v8, v10, v19, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v10, v11, v20, vcc +; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v7 ; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc -; GFX9-NEXT: v_add3_u32 v5, v8, v7, v5 -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v12, v4 -; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v15, v5, vcc -; GFX9-NEXT: v_mul_lo_u32 v7, s19, v4 -; GFX9-NEXT: v_mul_lo_u32 v8, s18, v5 -; GFX9-NEXT: v_cndmask_b32_e64 v6, v1, v6, s[0:1] -; GFX9-NEXT: v_mul_hi_u32 v1, s18, v4 +; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v9 +; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc +; GFX9-NEXT: v_mul_lo_u32 v9, s19, v5 ; GFX9-NEXT: v_mul_hi_u32 v4, s19, v4 -; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v8 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v7, v1 -; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc -; GFX9-NEXT: v_mul_lo_u32 v7, s19, v5 -; GFX9-NEXT: v_add_u32_e32 v1, v8, v1 -; GFX9-NEXT: v_mul_hi_u32 v8, s18, v5 -; GFX9-NEXT: v_mul_hi_u32 v12, s19, v5 -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v7, v4 -; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, v4, v1 +; GFX9-NEXT: v_add_u32_e32 v6, v7, v6 +; GFX9-NEXT: v_mul_hi_u32 v7, s18, v5 +; GFX9-NEXT: v_mul_hi_u32 v13, s19, v5 +; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v9, v4 +; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc +; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v7 +; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc +; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, v4, v6 ; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[2:3], s6, v11, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v7, v0, v9, s[0:1] -; GFX9-NEXT: v_add_u32_e32 v0, v10, v8 -; GFX9-NEXT: v_add3_u32 v8, v0, v1, v12 -; GFX9-NEXT: v_mov_b32_e32 v0, v5 -; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s6, v8, v[0:1] -; GFX9-NEXT: v_mov_b32_e32 v9, s19 +; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v6, v1, v8, s[0:1] +; GFX9-NEXT: v_add_u32_e32 v1, v9, v7 +; GFX9-NEXT: v_add3_u32 v12, v1, v12, v13 +; GFX9-NEXT: v_mov_b32_e32 v1, v5 +; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[2:3], s6, v12, v[1:2] +; GFX9-NEXT: v_cndmask_b32_e64 v7, v0, v10, s[0:1] ; GFX9-NEXT: v_mov_b32_e32 v5, s7 -; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s7, v11, v[0:1] -; GFX9-NEXT: v_sub_co_u32_e32 v1, vcc, s18, v4 -; GFX9-NEXT: v_subb_co_u32_e64 v9, s[0:1], v9, v0, vcc -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v9 +; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s7, v11, v[8:9] +; GFX9-NEXT: v_mov_b32_e32 v1, s19 +; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, s18, v4 +; GFX9-NEXT: v_subb_co_u32_e64 v1, s[0:1], v1, v0, vcc +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v1 ; GFX9-NEXT: v_sub_u32_e32 v0, s19, v0 ; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v1 -; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v9 +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v8 +; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[0:1] +; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v1 ; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v5, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[0:1] -; GFX9-NEXT: v_subrev_co_u32_e32 v10, vcc, s6, v1 -; GFX9-NEXT: v_subbrev_co_u32_e64 v12, s[0:1], 0, v0, vcc -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v12 +; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[0:1] +; GFX9-NEXT: v_subrev_co_u32_e32 v9, vcc, s6, v8 +; GFX9-NEXT: v_subbrev_co_u32_e64 v10, s[0:1], 0, v0, vcc +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v10 ; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v10 +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v9 ; GFX9-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v12 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v10 ; GFX9-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[0:1] ; GFX9-NEXT: v_add_co_u32_e64 v14, s[0:1], 1, v11 ; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v5, vcc -; GFX9-NEXT: v_addc_co_u32_e64 v15, s[0:1], 0, v8, s[0:1] +; GFX9-NEXT: v_addc_co_u32_e64 v15, s[0:1], 0, v12, s[0:1] ; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, 1, v14 ; GFX9-NEXT: v_addc_co_u32_e32 v16, vcc, 0, v15, vcc ; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13 ; GFX9-NEXT: v_cndmask_b32_e32 v5, v14, v5, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc -; GFX9-NEXT: v_subrev_co_u32_e64 v15, s[0:1], s6, v10 +; GFX9-NEXT: v_subrev_co_u32_e64 v15, s[0:1], s6, v9 ; GFX9-NEXT: v_subbrev_co_u32_e64 v0, s[0:1], 0, v0, s[0:1] ; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4 ; GFX9-NEXT: v_mov_b32_e32 v13, 0 ; GFX9-NEXT: v_cndmask_b32_e64 v4, v11, v5, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v5, v8, v14, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v8, v10, v15, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v0, v12, v0, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v8, v1, v8, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v0, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v5, v12, v14, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e32 v9, v9, v15, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v10, v0, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v9, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v9, v1, v0, s[0:1] ; GFX9-NEXT: global_store_dwordx4 v13, v[2:5], s[12:13] ; GFX9-NEXT: global_store_dwordx4 v13, v[6:9], s[14:15] ; GFX9-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll index d053425..7cc5051 100644 --- a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll +++ b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll @@ -1483,7 +1483,6 @@ define void @flat_atomic_xchg_i64_noret_av(ptr %ptr) #0 { ; GFX90A-NEXT: buffer_invl2 ; GFX90A-NEXT: buffer_wbinvl1_vol ; GFX90A-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX90A-NEXT: ; implicit-def: $vgpr2_vgpr3 ; GFX90A-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_cbranch_execz .LBB20_2 ; GFX90A-NEXT: .LBB20_4: ; %atomicrmw.private diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll index 815b9f2..df9c97f 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll @@ -161654,177 +161654,175 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: s_clause 0x1f -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:244 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:240 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:236 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:232 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:228 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:224 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:220 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:216 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:212 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:208 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:204 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:200 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:196 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:192 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:188 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:184 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:180 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:176 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:172 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:168 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:164 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:160 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:156 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:152 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:148 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:144 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:140 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:136 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:132 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:128 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:124 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:120 -; GFX11-TRUE16-NEXT: s_clause 0x1a -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:116 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:112 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:108 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:104 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:100 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:96 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:92 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:88 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:84 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:80 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:76 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:72 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:68 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:64 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:60 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:56 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:52 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:48 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:44 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:40 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:36 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:32 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:28 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:24 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:20 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:16 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:12 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:236 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:232 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:228 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:224 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:220 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:216 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:212 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:208 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:204 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:200 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:196 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:192 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:188 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:184 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:180 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:176 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:172 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:168 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:164 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:160 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:156 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:152 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:148 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:144 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:140 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:136 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:132 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:128 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:124 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:120 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:116 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:112 +; GFX11-TRUE16-NEXT: s_clause 0x18 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:108 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:104 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:100 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:96 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:92 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:88 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:84 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:80 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:76 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:72 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:68 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:64 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:60 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:56 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:52 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:48 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:44 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:40 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:36 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:28 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:24 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:20 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:16 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:12 ; GFX11-TRUE16-NEXT: s_clause 0x2 ; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32 offset:8 -; GFX11-TRUE16-NEXT: scratch_load_b32 v85, off, s32 offset:4 -; GFX11-TRUE16-NEXT: scratch_load_b32 v84, off, s32 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr180_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr143_lo16 +; GFX11-TRUE16-NEXT: scratch_load_b32 v99, off, s32 offset:4 +; GFX11-TRUE16-NEXT: scratch_load_b32 v98, off, s32 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr178_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr152_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr179_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr142_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr141_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr43_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr177_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr140_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr183_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr139_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr127_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr62_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr125_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr40_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr138_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr179_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr137_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr56_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr47_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr42_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr123_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr121_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr110_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr79_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr111_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr74_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr60_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr109_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr107_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr111_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr106_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr95_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr89_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr76_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr93_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr90_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr138_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr79_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr77_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr127_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr89_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr104_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr75_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr72_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr137_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr142_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr73_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr125_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr63_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr61_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr59_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr154_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr57_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr143_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr152_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr46_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr44_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr141_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr47_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr124_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr124_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr122_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr109_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr106_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr110_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr94_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr104_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr94_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr92_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr90_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr88_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr77_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr74_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr72_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr88_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr62_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr59_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr164_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr76_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr57_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr163_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr73_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr63_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr46_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr44_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr166_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr60_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr43_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr165_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr56_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr176_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr167_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr42_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr41_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr178_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr40_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr177_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr183_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr176_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr182_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr167_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr181_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr180_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 @@ -161838,135 +161836,135 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB90_2 ; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[15:16] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[116:117], 24, v[11:12] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[132:133], 24, v[7:8] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[133:134], 24, v[5:6] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[23:24] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[19:20] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[84:85], 24, v[27:28] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[13:14] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[114:115], 24, v[11:12] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[9:10] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[130:131], 24, v[7:8] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[144:145], 24, v[3:4] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[85:86], 24, v[25:26] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v47, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v13 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v79, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v89, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v91, 24, v10 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v10 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v111, 8, v7 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v6 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 8, v5 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v127, 24, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 8, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v3 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v141, 24, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v142, 8, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v143, 8, v1 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 24, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v137, 8, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v138, 8, v3 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 24, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v152, 8, v1 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 24, v85 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v85 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 24, v99 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 8, v99 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v40, 8, v84 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 24, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v42, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v56, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v60, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v76, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v104, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v106, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 8, v17 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[112:113], 24, v[13:14] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[128:129], 24, v[9:10] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[134:135], 24, v[3:4] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[146:147], 24, v[1:2] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[84:85] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v98 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 24, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 8, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v43, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v62, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v74, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v108, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 8, v17 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[131:132], 24, v[5:6] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[145:146], 24, v[1:2] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[98:99] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[29:30] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[27:28] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[96:97], 24, v[25:26] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[23:24] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[102:103], 24, v[21:22] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[115:116], 24, v[19:20] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[17:18] -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v180.h, v1.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v1.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v64.h, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v2.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v65.h, v2.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v43.h, v3.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v40.h, v3.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.h, v3.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v183.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v67.h, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v62.h, v5.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v56.h, v5.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.h, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v47.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v42.h, v6.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v6.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v91.h, v7.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v79.h, v7.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v74.h, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v60.h, v8.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.h, v8.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v111.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v98.h, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v89.h, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.h, v10.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v138.h, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v108.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v115.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v153.h, v13.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.h, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v137.h, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.h, v14.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v154.h, v15.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v144.h, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v152.h, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.h, v16.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v148.h, v17.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v106.h, v9.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.h, v9.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v76.h, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.h, v10.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v127.h, v11.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.h, v11.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v104.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.h, v12.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v142.h, v13.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.h, v13.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v125.h, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v14.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v143.h, v15.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.h, v15.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v141.h, v16.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v16.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v135.h, v17.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v17.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v18.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v150.h, v19.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v148.h, v19.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v19.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v20.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v147.h, v20.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v20.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v160.h, v21.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v150.h, v21.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v21.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v151.h, v22.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v22.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v22.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v162.h, v23.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v160.h, v23.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v23.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v161.h, v24.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v151.h, v24.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v24.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v164.h, v25.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v162.h, v25.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v25.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v163.h, v26.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v161.h, v26.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v26.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v166.h, v27.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v164.h, v27.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v27.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v165.h, v28.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v163.h, v28.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v28.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v29.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v166.h, v29.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.h, v29.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v30.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v165.h, v30.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v53.h, v30.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v84.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v54.h, v84.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v85.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v85.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v98.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v54.h, v98.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v99.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v99.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5 @@ -161982,148 +161980,153 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99 ; GFX11-TRUE16-NEXT: .LBB90_2: ; %Flow ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB90_4 ; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true ; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v17 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_add_f32 v34, 0x40c00000, v32 :: v_dual_lshlrev_b32 v31, 16, v18 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v31, 16, v18 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v20 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_add_f32 v34, 0x40c00000, v32 :: v_dual_add_f32 v31, 0x40c00000, v31 ; GFX11-TRUE16-NEXT: v_bfe_u32 v38, v34, 16, 1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_bfe_u32 v32, v31, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v31 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v32, v32, v31, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v31, v38, v34, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v39, v32, v37, vcc_lo -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_dual_add_f32 v18, 0x40c00000, v18 :: v_dual_lshlrev_b32 v17, 16, v17 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_bfe_u32 v35, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_add3_u32 v35, v35, v18, 0x7fff -; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 0x40c00000, v17 :: v_dual_cndmask_b32 v32, v35, v36 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v33 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v35, v36, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v48, v17, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v49, 0x400000, v17 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v39.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v37, v48, v17, 0x7fff -; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v20 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 24, v32 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v32 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 24, v32 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v32 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v148, v37, v49, vcc_lo -; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v33 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v34 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v135, v37, v49, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff0000, v19 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v31, v31, v33, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v33, v20, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20 -; GFX11-TRUE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v34 :: v_dual_add_f32 v19, 0x40c00000, v19 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_add3_u32 v33, v33, v20, 0x7fff ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff +; GFX11-TRUE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v34 :: v_dual_add_f32 v19, 0x40c00000, v19 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add3_u32 v33, v33, v20, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, 0x400000, v18 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v135.h ; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v36, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v148.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v149, v33, v35, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v147, v33, v35, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v36 -; GFX11-TRUE16-NEXT: v_add3_u32 v20, v20, v36, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v19 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v22 +; GFX11-TRUE16-NEXT: v_add3_u32 v20, v20, v36, 0x7fff +; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v36 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 8, v31 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v34, v17, v34, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v19, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v34.l, v149.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 8, v31 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v34.l, v147.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v19, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v150, v17, v33, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v34 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v108, 8, v34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v148, v17, v33, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v33, v20, v35 :: v_dual_and_b32 v20, 0xffff0000, v21 +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v33, v20, v35 :: v_dual_and_b32 v18, 0xffff0000, v22 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v21 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 24, v34 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 8, v34 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18 +; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_add_f32 v22, 0x40c00000, v22 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 -; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_lshlrev_b32 v22, 16, v22 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v22 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v151, v19, v35 :: v_dual_lshlrev_b32 v22, 16, v24 -; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 -; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff -; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18 +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v149, v19, v35 :: v_dual_lshlrev_b32 v22, 16, v24 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 -; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v24 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v21 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v36, v17, v36, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v36.l, v151.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v36.l, v149.h ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v160, v17, v24 :: v_dual_lshlrev_b32 v21, 16, v23 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v23 +; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v150, v17, v24, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 -; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 +; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v23 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v36 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v35, v19, v35, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 -; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff -; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_add_f32 v20, 0x40c00000, v20 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v33.l, v148.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v36 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 24, v36 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v104, 8, v36 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v161, v19, v23, vcc_lo -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v33 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v151, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v26 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff +; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff +; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v26 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v17, v24, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 -; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v20 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v38.l, v151.h ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v25 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v33.l, v150.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v38.l, v161.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v162, v17, v23, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v160, v17, v23 :: v_dual_lshlrev_b32 v21, 16, v25 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 @@ -162136,10 +162139,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v33 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v163, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v161, v19, v23 :: v_dual_lshlrev_b32 v22, 16, v28 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 @@ -162152,10 +162153,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v27 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v35.l, v160.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v49.l, v163.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v49.l, v161.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v35.l, v150.h ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v164, v17, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v162, v17, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 @@ -162168,10 +162169,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 24, v49 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v49 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v106, 8, v35 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v165, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v49 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v62, 8, v49 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 8, v35 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v163, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 @@ -162184,10 +162185,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v29 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v51.l, v165.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 24, v38 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v38 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v166, v17, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v51.l, v163.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v74, 24, v38 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v38 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v164, v17, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 @@ -162200,14 +162201,14 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v85 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v56, 24, v51 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v51 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v167, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v99 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v51 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v51 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v165, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v85 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v99 ; GFX11-TRUE16-NEXT: v_dual_add_f32 v22, 0x40c00000, v22 :: v_dual_cndmask_b32 v53, v17, v24 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 @@ -162216,14 +162217,14 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v84 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v53.l, v167.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v176, v17, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v98 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v37.l, v160.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v53.l, v165.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v166, v17, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v84 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v98 ; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_cndmask_b32 v52, v19, v24 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 @@ -162232,10 +162233,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v21 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v37.l, v162.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 24, v53 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v42, 8, v53 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v177, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 24, v53 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 8, v53 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v37 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v167, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v2 @@ -162248,11 +162249,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v55.l, v177.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v37 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v55.l, v167.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v2 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v178, v17, v22, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v176, v17, v22, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v1 @@ -162265,11 +162265,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_add_f32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v2, 0x7fff -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 24, v55 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v55 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 24, v55 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 8, v55 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v20, 16, 1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v179, v19, v21, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v177, v19, v21, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v4 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 @@ -162282,10 +162282,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v1, 0x7fff -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v65.l, v179.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v48.l, v162.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v65.l, v177.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v18, 16, 1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v180, v17, v19, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v178, v17, v19, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v3 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 @@ -162300,9 +162301,9 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v6 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v48.l, v164.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v141, 24, v65 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v183, v2, v19, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 24, v65 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v65 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v179, v2, v19, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v17, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v3 @@ -162312,13 +162313,13 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v17, 0x7fff ; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v67.l, v183.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v67.l, v179.h ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v5 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v50.l, v166.h -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[96:97], 24, v[48:49] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v43, v1, v18, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v50.l, v164.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 24, v67 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v40, v1, v18, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v4, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v6 @@ -162329,13 +162330,13 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.l, v43.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[84:85], 24, v[50:51] ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[50:51] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v127, 24, v67 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 8, v67 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v47, v2, v17, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[85:86], 24, v[48:49] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[37:38] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v137, 8, v67 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v42, v2, v17, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v8 @@ -162349,23 +162350,23 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.l, v47.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v62, v2, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v50 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v56, v2, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_add3_u32 v3, v5, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v69 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v48 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v68, v1, v17, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v2, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v7 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v69 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.l, v56.h ; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v74, v3, v5, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v60, v3, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v7 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v10 @@ -162379,8 +162380,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v4, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.l, v74.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v52.l, v176.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.l, v60.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v52.l, v166.h ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v1, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 @@ -162388,21 +162389,20 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6 ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v91, v4, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v79, v4, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v12 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v11 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.l, v91.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v89, v1, v4, vcc_lo -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v9 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.l, v79.h +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v76, v1, v4 :: v_dual_lshlrev_b32 v1, 16, v9 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v12 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v99, v2, v7, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v2, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 @@ -162410,10 +162410,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v54.l, v178.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.l, v89.h -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[132:133], 24, v[82:83] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v98, v2, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.l, v76.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v54.l, v176.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[130:131], 24, v[82:83] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v96, v2, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 @@ -162421,29 +162421,29 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[54:55] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[52:53] ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v6, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v111, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v106, v2, v3, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_add3_u32 v3, v7, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v14 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v98.l, v111.h -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v108, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.l, v106.h +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v104, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[128:129], 24, v[98:99] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[96:97] ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v115, v3, v7, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v113, v3, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v13 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v115.l, v108.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v99 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v114, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.l, v104.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[31:32] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v112, v2, v3, vcc_lo ; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v4 :: v_dual_add_f32 v3, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1 @@ -162452,8 +162452,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v64.l, v180.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v138, v4, v5, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v64.l, v178.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v127, v4, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 @@ -162461,19 +162461,19 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v131, v4, v5, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v129, v4, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 ; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v16 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v15 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v137, v6, v7, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v125, v6, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v15 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v114.l, v138.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v130, v3, v4, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.l, v40.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v128, v3, v4, vcc_lo ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v16 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v6, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2 @@ -162481,11 +162481,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.l, v62.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.l, v137.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.l, v125.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.l, v127.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v153, v4, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v142, v4, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3 @@ -162494,83 +162494,82 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8 ; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.l, v153.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v152, v2, v9, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.l, v142.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v141, v2, v9, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[116:117], 24, v[114:115] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[133:134], 24, v[68:69] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v154, v7, v11, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.l, v42.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[114:115], 24, v[112:113] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v143, v7, v11, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[33:34] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[112:113], 24, v[130:131] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[134:135], 24, v[66:67] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v145, v4, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[144:145], 24, v[66:67] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[131:132], 24, v[68:69] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[145:146], 24, v[64:65] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v134, v4, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.l, v152.h -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[146:147], 24, v[64:65] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[31:32] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v131 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v144, v2, v3, vcc_lo -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v144.l, v154.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v145 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v145 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 8, v131 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v130 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[144:145] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[37:38] +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.l, v141.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[115:116], 24, v[33:34] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 24, v129 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 8, v129 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v133, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.l, v143.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 24, v134 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v47, 8, v134 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v128 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v113 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[133:134] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[128:129] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[102:103], 24, v[35:36] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v144 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v115 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v115 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v79, 8, v114 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v99 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v98 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v83 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 8, v83 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v82 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 8, v68 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v66 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v142, 8, v65 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v143, 8, v64 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v40, 8, v54 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 8, v52 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v60, 8, v50 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v76, 8, v48 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v133 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 8, v113 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v89, 8, v112 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v91, 24, v97 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v97 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v96 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 24, v83 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 8, v83 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v111, 8, v82 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v69 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v69 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v68 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v138, 8, v66 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v152, 8, v64 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v54 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v43, 8, v52 ; GFX11-TRUE16-NEXT: .LBB90_4: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v180.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.h, 8, v143.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v178.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.h, 8, v152.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v64.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v146.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v145.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v65.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v1.l, v1.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v141.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v139.l ; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v2.l, v2.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v179.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v142.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v177.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v140.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v66.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v134.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v144.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v5, v1 ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v67.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v2.l, v2.h ; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v3.l, v3.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v43.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v140.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v127.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v40.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v138.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v136.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v68.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v5, v2 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v133.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v131.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v3.l, v3.h ; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.l, v4.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v183.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v139.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v179.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v137.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v69.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v121.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v5, v3 @@ -162578,89 +162577,89 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v4.l, v4.h ; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v6.l, v6.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v62.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v125.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v132.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v56.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v126.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v130.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v83.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v105.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v107.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v6.l, v6.h ; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v7.l, v7.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v47.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v42.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v123.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v98.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v96.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v117.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v5, v6 -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v99.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v97.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v7.l, v7.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v8.l, v8.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v91.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v110.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v90.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v114.h +; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v79.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v111.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v91.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v112.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v5, v7 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v116.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v114.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v8.l, v8.h ; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.l, v9.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v74.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v107.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v115.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v60.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v109.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v113.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v75.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v5, v8 -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v130.h +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v128.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v9.l, v9.h ; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v111.h +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v106.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v95.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v112.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v131.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v101.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v129.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v5, v9 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v59.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v61.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v10.l, v10.h ; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v89.h +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v76.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v93.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v144.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v133.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v100.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v5, v10 -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v145.h +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v134.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v11.l, v11.h ; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v12.l, v12.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v138.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v79.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v44.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v127.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v89.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v45.l ; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v31.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v5, v11 ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v118.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v12.l, v12.h ; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v13.l, v13.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v108.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v77.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v104.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v78.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v124.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v120.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v5, v12 ; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v13.l, v13.h ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.l, v14.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v153.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v72.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v142.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v73.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v115.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v34.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v5, v13 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v109.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v105.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v14.l, v14.h ; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v15.l, v15.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v137.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v61.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v125.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v63.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v35.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v102.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v5, v14 @@ -162668,71 +162667,71 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v15.l, v15.h ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.l, v16.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v154.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v57.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v94.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v143.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v58.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v90.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v37.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v5, v15 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v101.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v86.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v16.l, v16.h ; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v17.l, v17.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v152.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v46.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v141.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v47.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v38.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v78.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v74.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v5, v16 ; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v48.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v17.l, v17.h ; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v18.l, v18.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v148.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v136.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v135.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v124.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v85.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v49.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v5, v17 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v63.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v59.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v18.l, v18.h ; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v19.l, v19.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v126.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v122.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v50.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v86.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v84.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v5, v18 ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v51.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v19.l, v19.h ; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v20.l, v20.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v150.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v122.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v56.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v148.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v110.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v44.l ; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v52.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v5, v19 ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v80.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v20.l, v20.h ; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v21.l, v21.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v149.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v120.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v147.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v108.l ; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v53.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v41.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v183.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v5, v20 ; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v54.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v21.l, v21.h ; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v22.l, v22.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v160.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v106.l +; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v150.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v94.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v70.l ; GFX11-TRUE16-NEXT: v_and_b16 v34.l, 0xff, v55.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v5, v21 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v181.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v180.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v22.l, v22.h ; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v23.l, v23.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v151.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v104.l +; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v149.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v92.l ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[6:9], off offset:16 @@ -162740,71 +162739,71 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v23.l, v23.h ; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v24.l, v24.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v162.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v92.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v160.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v88.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v5, v23 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v24.l, v24.h ; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v25.l, v25.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v161.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v88.l +; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v151.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v77.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v5, v24 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v25.l, v25.h ; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v26.l, v26.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v164.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v76.l +; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v162.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v72.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v5, v25 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v26.l, v26.h ; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v27.l, v27.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v163.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v73.l +; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v161.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v62.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v5, v26 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v27.l, v27.h ; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v28.l, v28.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v166.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v60.l +; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v164.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v57.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v5, v27 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v28.l, v28.h ; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v29.l, v29.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v165.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v58.l +; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v163.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v46.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v5, v28 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v29.l, v29.h ; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v30.l, v30.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v176.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v45.l +; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v166.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v43.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v5, v29 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v30.l, v30.h ; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v31.l, v31.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v167.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v42.l +; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v165.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v41.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v5, v30 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v31.l, v31.h ; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v32.l, v32.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v178.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v40.l +; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v176.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v182.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v5, v31 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v32.l, v32.h ; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v33.l, v33.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v177.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v182.l +; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v167.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v181.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, v5, v32 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v33.l, v33.h @@ -162820,66 +162819,64 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[26:29], off offset:96 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[30:33], off offset:112 ; GFX11-TRUE16-NEXT: s_clause 0x1f -; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:12 -; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:16 -; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:20 -; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:24 -; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:28 -; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:32 -; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:36 -; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:40 -; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:44 -; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:48 -; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:52 -; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:56 -; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:60 -; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:64 -; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:68 -; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:72 -; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:76 -; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:80 -; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:84 -; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:88 -; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:92 -; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:96 -; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:100 -; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:104 -; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:108 -; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:112 -; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:116 -; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:120 -; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:124 -; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:128 -; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:132 -; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:136 -; GFX11-TRUE16-NEXT: s_clause 0x1a -; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:140 -; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:144 -; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:148 -; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:152 -; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:156 -; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:160 -; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:164 -; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:168 -; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:172 -; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:176 -; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:180 -; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:184 -; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:188 -; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:192 -; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:196 -; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:200 -; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:204 -; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:208 -; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:212 -; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:216 -; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:220 -; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:224 -; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:228 -; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:232 -; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:236 -; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:240 -; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:244 +; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:12 +; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:16 +; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:20 +; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:24 +; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:28 +; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:32 +; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:36 +; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:40 +; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:44 +; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:48 +; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:52 +; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:56 +; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:60 +; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:64 +; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:68 +; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:72 +; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:76 +; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:80 +; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:84 +; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:88 +; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:92 +; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:96 +; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:100 +; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:104 +; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:108 +; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:112 +; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:116 +; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:120 +; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:124 +; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:128 +; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:132 +; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:136 +; GFX11-TRUE16-NEXT: s_clause 0x18 +; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:140 +; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:144 +; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:148 +; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:152 +; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:156 +; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:160 +; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:164 +; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:168 +; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:172 +; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:176 +; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:180 +; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:184 +; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:188 +; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:192 +; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:196 +; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:200 +; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:204 +; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:208 +; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:212 +; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:216 +; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:220 +; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:224 +; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:228 +; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:232 +; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:236 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -186724,55 +186721,55 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 @@ -186798,24 +186795,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[9:10] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[3:4] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[25:26] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 @@ -186827,24 +186824,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[15:16] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[1:2] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24] @@ -186906,24 +186903,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[21:22] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[65:66], 24, v[17:18] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 @@ -186935,24 +186932,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 8, v17 ; GFX11-TRUE16-NEXT: .LBB94_4: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 @@ -186990,7 +186987,7 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v68.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h @@ -186998,15 +186995,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v149.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v147.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v149.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v67.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h @@ -187014,15 +187011,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v135.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v133.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v135.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v133.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h @@ -187030,15 +187027,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v119.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v129.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v119.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h @@ -187046,15 +187043,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v115.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v113.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v115.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v113.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h @@ -187062,15 +187059,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v101.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v99.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v101.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v99.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h @@ -187078,8 +187075,8 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v87.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v85.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v87.l ; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l ; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15 @@ -187094,15 +187091,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v150.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v150.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v148.l ; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v148.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h @@ -187110,15 +187107,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v134.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v134.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h @@ -187126,15 +187123,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v130.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v130.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v128.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h @@ -187142,15 +187139,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v116.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v116.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v114.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v114.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h @@ -187158,15 +187155,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v100.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v100.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h @@ -187174,15 +187171,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v86.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v86.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v85.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h @@ -209426,55 +209423,55 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 @@ -209500,24 +209497,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[9:10] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[3:4] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[25:26] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 @@ -209529,24 +209526,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[15:16] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[1:2] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24] @@ -209608,24 +209605,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[21:22] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[65:66], 24, v[17:18] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 @@ -209637,24 +209634,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 8, v17 ; GFX11-TRUE16-NEXT: .LBB98_4: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 @@ -209692,7 +209689,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v68.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h @@ -209700,15 +209697,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v149.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v147.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v149.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v67.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h @@ -209716,15 +209713,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v135.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v133.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v135.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v133.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h @@ -209732,15 +209729,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v119.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v129.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v119.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h @@ -209748,15 +209745,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v115.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v113.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v115.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v113.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h @@ -209764,15 +209761,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v101.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v99.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v101.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v99.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h @@ -209780,8 +209777,8 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v87.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v85.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v87.l ; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l ; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15 @@ -209796,15 +209793,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v150.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v150.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v148.l ; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v148.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h @@ -209812,15 +209809,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v134.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v134.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h @@ -209828,15 +209825,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v130.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v130.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v128.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h @@ -209844,15 +209841,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v116.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v116.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v114.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v114.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h @@ -209860,15 +209857,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v100.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v100.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h @@ -209876,15 +209873,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v86.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v86.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v85.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll index e33493c..d3fbba3 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll @@ -85072,13 +85072,13 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 @@ -85086,20 +85086,20 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_lo16 @@ -85119,18 +85119,18 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 24, v16 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 8, v16 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v14 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v13 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v12 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v11 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 24, v10 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v10 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v9 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v8 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v7 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v6 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v6 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v5 @@ -85159,19 +85159,19 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v7.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.h, v8.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v8.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.h, v9.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v9.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v9.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v67.h, v10.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v10.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.h, v11.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v12.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v12.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.h, v13.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.h, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v87.h, v14.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v14.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v15.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.h, v15.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v15.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v103.h, v16.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v16.h @@ -85345,29 +85345,29 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v68.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v67.h ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v12 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v67.h ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[21:22] ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v25, v2, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[21:22] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[50:51], 24, v[19:20] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[17:18] ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v6, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v83, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v2, v3, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v8 :: v_dual_lshlrev_b32 v5, 16, v14 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v83.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v24 -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v82, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v82.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[17:18] +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v80, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 @@ -85384,82 +85384,81 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v23 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v80.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v24 ; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v27, v2, v3 :: v_dual_add_f32 v2, 0x40c00000, v4 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v82.h +; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v28 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v4, v5, vcc_lo -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v2, 0x7fff +; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 ; GFX11-TRUE16-NEXT: v_add3_u32 v6, v8, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v97.h +; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v33, v4, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13 -; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v15 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v96, v6, v7, vcc_lo -; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v97.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v87, v6, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v24 ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[34:35], 24, v[27:28] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[25:26] -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v96.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v28 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v87.h ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v3, v4, vcc_lo ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v16 ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 24, v33 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v33 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[25:26] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 24, v33 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v33 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v6, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v28 -; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v23 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v112, v4, v6 :: v_dual_add_f32 v1, 0x40c00000, v5 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8 -; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v8 :: v_dual_lshlrev_b32 v5, 16, v15 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v112.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v103, v2, v9, vcc_lo -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 -; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v15 -; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v32 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v27 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff +; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v114, v7, v11, vcc_lo +; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v103, v2, v9, vcc_lo +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v113, v7, v11, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v4, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v103.h ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v37, v2, v3, vcc_lo -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v114.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v113.h ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 24, v38 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 8, v38 @@ -85524,7 +85523,7 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v7.l, v7.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v31.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v68.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v113.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v114.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v98.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v27.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v31, v6 @@ -85541,12 +85540,12 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v8.l, v8.h ; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.l, v9.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v83.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v82.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v100.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v30.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v33.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v31, v8 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v80.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v81.l ; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v9.l, v9.h ; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v31.h @@ -85560,14 +85559,14 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v31.h ; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v97.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v87.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v96.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v69.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v31, v10 ; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v11.l, v11.h ; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v12.l, v12.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v82.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v80.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v86.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v31, v11 @@ -85581,14 +85580,14 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v13.l, v13.h ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.l, v14.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v96.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v81.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v87.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v83.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v31, v13 ; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v14.l, v14.h ; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v15.l, v15.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v114.h +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v113.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v71.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v31, v14 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll index 67c9bfe..ecc715c 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll @@ -11261,8 +11261,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 ; VI-NEXT: s_lshr_b32 s5, s17, 24 ; VI-NEXT: s_lshr_b32 s8, s17, 16 -; VI-NEXT: s_lshr_b32 s9, s17, 8 -; VI-NEXT: s_lshr_b32 s10, s16, 16 +; VI-NEXT: s_lshr_b32 s10, s17, 8 +; VI-NEXT: s_lshr_b32 s9, s16, 16 ; VI-NEXT: s_lshr_b32 s11, s16, 8 ; VI-NEXT: s_cbranch_execnz .LBB85_4 ; VI-NEXT: .LBB85_2: ; %cmp.true @@ -11277,9 +11277,9 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; VI-NEXT: s_branch .LBB85_5 ; VI-NEXT: .LBB85_3: ; VI-NEXT: ; implicit-def: $sgpr11 -; VI-NEXT: ; implicit-def: $sgpr10 -; VI-NEXT: ; implicit-def: $sgpr4 ; VI-NEXT: ; implicit-def: $sgpr9 +; VI-NEXT: ; implicit-def: $sgpr4 +; VI-NEXT: ; implicit-def: $sgpr10 ; VI-NEXT: ; implicit-def: $sgpr8 ; VI-NEXT: ; implicit-def: $sgpr5 ; VI-NEXT: s_branch .LBB85_2 @@ -11287,8 +11287,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; VI-NEXT: v_mov_b32_e32 v8, s16 ; VI-NEXT: v_mov_b32_e32 v9, s17 ; VI-NEXT: v_mov_b32_e32 v1, s11 -; VI-NEXT: v_mov_b32_e32 v2, s10 -; VI-NEXT: v_mov_b32_e32 v5, s9 +; VI-NEXT: v_mov_b32_e32 v2, s9 +; VI-NEXT: v_mov_b32_e32 v5, s10 ; VI-NEXT: v_mov_b32_e32 v6, s8 ; VI-NEXT: v_mov_b32_e32 v7, s5 ; VI-NEXT: v_mov_b32_e32 v3, s4 @@ -11306,8 +11306,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 ; GFX9-NEXT: s_lshr_b32 s5, s17, 24 ; GFX9-NEXT: s_lshr_b32 s8, s17, 16 -; GFX9-NEXT: s_lshr_b32 s9, s17, 8 -; GFX9-NEXT: s_lshr_b32 s10, s16, 16 +; GFX9-NEXT: s_lshr_b32 s10, s17, 8 +; GFX9-NEXT: s_lshr_b32 s9, s16, 16 ; GFX9-NEXT: s_lshr_b32 s11, s16, 8 ; GFX9-NEXT: s_cbranch_execnz .LBB85_4 ; GFX9-NEXT: .LBB85_2: ; %cmp.true @@ -11322,9 +11322,9 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; GFX9-NEXT: s_branch .LBB85_5 ; GFX9-NEXT: .LBB85_3: ; GFX9-NEXT: ; implicit-def: $sgpr11 -; GFX9-NEXT: ; implicit-def: $sgpr10 -; GFX9-NEXT: ; implicit-def: $sgpr4 ; GFX9-NEXT: ; implicit-def: $sgpr9 +; GFX9-NEXT: ; implicit-def: $sgpr4 +; GFX9-NEXT: ; implicit-def: $sgpr10 ; GFX9-NEXT: ; implicit-def: $sgpr8 ; GFX9-NEXT: ; implicit-def: $sgpr5 ; GFX9-NEXT: s_branch .LBB85_2 @@ -11332,8 +11332,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; GFX9-NEXT: v_mov_b32_e32 v8, s16 ; GFX9-NEXT: v_mov_b32_e32 v9, s17 ; GFX9-NEXT: v_mov_b32_e32 v1, s11 -; GFX9-NEXT: v_mov_b32_e32 v2, s10 -; GFX9-NEXT: v_mov_b32_e32 v5, s9 +; GFX9-NEXT: v_mov_b32_e32 v2, s9 +; GFX9-NEXT: v_mov_b32_e32 v5, s10 ; GFX9-NEXT: v_mov_b32_e32 v6, s8 ; GFX9-NEXT: v_mov_b32_e32 v7, s5 ; GFX9-NEXT: v_mov_b32_e32 v3, s4 @@ -11352,8 +11352,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24 ; GFX11-NEXT: s_lshr_b32 s3, s1, 24 ; GFX11-NEXT: s_lshr_b32 s5, s1, 16 -; GFX11-NEXT: s_lshr_b32 s6, s1, 8 -; GFX11-NEXT: s_lshr_b32 s7, s0, 16 +; GFX11-NEXT: s_lshr_b32 s7, s1, 8 +; GFX11-NEXT: s_lshr_b32 s6, s0, 16 ; GFX11-NEXT: s_lshr_b32 s8, s0, 8 ; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4 ; GFX11-NEXT: s_cbranch_vccnz .LBB85_4 @@ -11370,16 +11370,16 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; GFX11-NEXT: s_branch .LBB85_5 ; GFX11-NEXT: .LBB85_3: ; GFX11-NEXT: ; implicit-def: $sgpr8 -; GFX11-NEXT: ; implicit-def: $sgpr7 -; GFX11-NEXT: ; implicit-def: $sgpr2 ; GFX11-NEXT: ; implicit-def: $sgpr6 +; GFX11-NEXT: ; implicit-def: $sgpr2 +; GFX11-NEXT: ; implicit-def: $sgpr7 ; GFX11-NEXT: ; implicit-def: $sgpr5 ; GFX11-NEXT: ; implicit-def: $sgpr3 ; GFX11-NEXT: s_branch .LBB85_2 ; GFX11-NEXT: .LBB85_4: ; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1 -; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7 -; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5 +; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6 +; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5 ; GFX11-NEXT: v_mov_b32_e32 v7, s3 ; GFX11-NEXT: v_mov_b32_e32 v3, s2 ; GFX11-NEXT: .LBB85_5: ; %end @@ -13517,8 +13517,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 ; GFX9-NEXT: s_lshr_b32 s5, s17, 24 ; GFX9-NEXT: s_lshr_b32 s8, s17, 16 -; GFX9-NEXT: s_lshr_b32 s9, s17, 8 -; GFX9-NEXT: s_lshr_b32 s10, s16, 16 +; GFX9-NEXT: s_lshr_b32 s10, s17, 8 +; GFX9-NEXT: s_lshr_b32 s9, s16, 16 ; GFX9-NEXT: s_lshr_b32 s11, s16, 8 ; GFX9-NEXT: s_cbranch_execnz .LBB97_4 ; GFX9-NEXT: .LBB97_2: ; %cmp.true @@ -13533,9 +13533,9 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; GFX9-NEXT: s_branch .LBB97_5 ; GFX9-NEXT: .LBB97_3: ; GFX9-NEXT: ; implicit-def: $sgpr11 -; GFX9-NEXT: ; implicit-def: $sgpr10 -; GFX9-NEXT: ; implicit-def: $sgpr4 ; GFX9-NEXT: ; implicit-def: $sgpr9 +; GFX9-NEXT: ; implicit-def: $sgpr4 +; GFX9-NEXT: ; implicit-def: $sgpr10 ; GFX9-NEXT: ; implicit-def: $sgpr8 ; GFX9-NEXT: ; implicit-def: $sgpr5 ; GFX9-NEXT: s_branch .LBB97_2 @@ -13543,8 +13543,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; GFX9-NEXT: v_mov_b32_e32 v8, s16 ; GFX9-NEXT: v_mov_b32_e32 v9, s17 ; GFX9-NEXT: v_mov_b32_e32 v1, s11 -; GFX9-NEXT: v_mov_b32_e32 v2, s10 -; GFX9-NEXT: v_mov_b32_e32 v5, s9 +; GFX9-NEXT: v_mov_b32_e32 v2, s9 +; GFX9-NEXT: v_mov_b32_e32 v5, s10 ; GFX9-NEXT: v_mov_b32_e32 v6, s8 ; GFX9-NEXT: v_mov_b32_e32 v7, s5 ; GFX9-NEXT: v_mov_b32_e32 v3, s4 @@ -13563,8 +13563,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24 ; GFX11-NEXT: s_lshr_b32 s3, s1, 24 ; GFX11-NEXT: s_lshr_b32 s5, s1, 16 -; GFX11-NEXT: s_lshr_b32 s6, s1, 8 -; GFX11-NEXT: s_lshr_b32 s7, s0, 16 +; GFX11-NEXT: s_lshr_b32 s7, s1, 8 +; GFX11-NEXT: s_lshr_b32 s6, s0, 16 ; GFX11-NEXT: s_lshr_b32 s8, s0, 8 ; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4 ; GFX11-NEXT: s_cbranch_vccnz .LBB97_4 @@ -13581,16 +13581,16 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; GFX11-NEXT: s_branch .LBB97_5 ; GFX11-NEXT: .LBB97_3: ; GFX11-NEXT: ; implicit-def: $sgpr8 -; GFX11-NEXT: ; implicit-def: $sgpr7 -; GFX11-NEXT: ; implicit-def: $sgpr2 ; GFX11-NEXT: ; implicit-def: $sgpr6 +; GFX11-NEXT: ; implicit-def: $sgpr2 +; GFX11-NEXT: ; implicit-def: $sgpr7 ; GFX11-NEXT: ; implicit-def: $sgpr5 ; GFX11-NEXT: ; implicit-def: $sgpr3 ; GFX11-NEXT: s_branch .LBB97_2 ; GFX11-NEXT: .LBB97_4: ; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1 -; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7 -; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5 +; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6 +; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5 ; GFX11-NEXT: v_mov_b32_e32 v7, s3 ; GFX11-NEXT: v_mov_b32_e32 v3, s2 ; GFX11-NEXT: .LBB97_5: ; %end @@ -15345,8 +15345,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 ; GFX9-NEXT: s_lshr_b32 s5, s17, 24 ; GFX9-NEXT: s_lshr_b32 s8, s17, 16 -; GFX9-NEXT: s_lshr_b32 s9, s17, 8 -; GFX9-NEXT: s_lshr_b32 s10, s16, 16 +; GFX9-NEXT: s_lshr_b32 s10, s17, 8 +; GFX9-NEXT: s_lshr_b32 s9, s16, 16 ; GFX9-NEXT: s_lshr_b32 s11, s16, 8 ; GFX9-NEXT: s_cbranch_execnz .LBB105_4 ; GFX9-NEXT: .LBB105_2: ; %cmp.true @@ -15362,9 +15362,9 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; GFX9-NEXT: s_branch .LBB105_5 ; GFX9-NEXT: .LBB105_3: ; GFX9-NEXT: ; implicit-def: $sgpr11 -; GFX9-NEXT: ; implicit-def: $sgpr10 -; GFX9-NEXT: ; implicit-def: $sgpr4 ; GFX9-NEXT: ; implicit-def: $sgpr9 +; GFX9-NEXT: ; implicit-def: $sgpr4 +; GFX9-NEXT: ; implicit-def: $sgpr10 ; GFX9-NEXT: ; implicit-def: $sgpr8 ; GFX9-NEXT: ; implicit-def: $sgpr5 ; GFX9-NEXT: s_branch .LBB105_2 @@ -15372,8 +15372,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; GFX9-NEXT: v_mov_b32_e32 v8, s16 ; GFX9-NEXT: v_mov_b32_e32 v9, s17 ; GFX9-NEXT: v_mov_b32_e32 v1, s11 -; GFX9-NEXT: v_mov_b32_e32 v2, s10 -; GFX9-NEXT: v_mov_b32_e32 v5, s9 +; GFX9-NEXT: v_mov_b32_e32 v2, s9 +; GFX9-NEXT: v_mov_b32_e32 v5, s10 ; GFX9-NEXT: v_mov_b32_e32 v6, s8 ; GFX9-NEXT: v_mov_b32_e32 v7, s5 ; GFX9-NEXT: v_mov_b32_e32 v3, s4 @@ -15392,8 +15392,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24 ; GFX11-NEXT: s_lshr_b32 s3, s1, 24 ; GFX11-NEXT: s_lshr_b32 s5, s1, 16 -; GFX11-NEXT: s_lshr_b32 s6, s1, 8 -; GFX11-NEXT: s_lshr_b32 s7, s0, 16 +; GFX11-NEXT: s_lshr_b32 s7, s1, 8 +; GFX11-NEXT: s_lshr_b32 s6, s0, 16 ; GFX11-NEXT: s_lshr_b32 s8, s0, 8 ; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4 ; GFX11-NEXT: s_cbranch_vccnz .LBB105_4 @@ -15410,16 +15410,16 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; GFX11-NEXT: s_branch .LBB105_5 ; GFX11-NEXT: .LBB105_3: ; GFX11-NEXT: ; implicit-def: $sgpr8 -; GFX11-NEXT: ; implicit-def: $sgpr7 -; GFX11-NEXT: ; implicit-def: $sgpr2 ; GFX11-NEXT: ; implicit-def: $sgpr6 +; GFX11-NEXT: ; implicit-def: $sgpr2 +; GFX11-NEXT: ; implicit-def: $sgpr7 ; GFX11-NEXT: ; implicit-def: $sgpr5 ; GFX11-NEXT: ; implicit-def: $sgpr3 ; GFX11-NEXT: s_branch .LBB105_2 ; GFX11-NEXT: .LBB105_4: ; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1 -; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7 -; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5 +; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6 +; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5 ; GFX11-NEXT: v_mov_b32_e32 v7, s3 ; GFX11-NEXT: v_mov_b32_e32 v3, s2 ; GFX11-NEXT: .LBB105_5: ; %end @@ -16493,8 +16493,8 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32 ; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 ; VI-NEXT: s_lshr_b32 s8, s17, 24 ; VI-NEXT: s_lshr_b32 s5, s17, 16 -; VI-NEXT: s_lshr_b32 s9, s17, 8 -; VI-NEXT: s_lshr_b32 s10, s16, 16 +; VI-NEXT: s_lshr_b32 s10, s17, 8 +; VI-NEXT: s_lshr_b32 s9, s16, 16 ; VI-NEXT: s_lshr_b32 s11, s16, 8 ; VI-NEXT: s_cbranch_execnz .LBB109_4 ; VI-NEXT: .LBB109_2: ; %cmp.true @@ -16546,16 +16546,16 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32 ; VI-NEXT: s_setpc_b64 s[30:31] ; VI-NEXT: .LBB109_3: ; VI-NEXT: ; implicit-def: $sgpr11 -; VI-NEXT: ; implicit-def: $sgpr10 -; VI-NEXT: ; implicit-def: $sgpr4 ; VI-NEXT: ; implicit-def: $sgpr9 +; VI-NEXT: ; implicit-def: $sgpr4 +; VI-NEXT: ; implicit-def: $sgpr10 ; VI-NEXT: ; implicit-def: $sgpr5 ; VI-NEXT: ; implicit-def: $sgpr8 ; VI-NEXT: s_branch .LBB109_2 ; VI-NEXT: .LBB109_4: ; VI-NEXT: v_mov_b32_e32 v1, s11 -; VI-NEXT: v_mov_b32_e32 v2, s10 -; VI-NEXT: v_mov_b32_e32 v5, s9 +; VI-NEXT: v_mov_b32_e32 v2, s9 +; VI-NEXT: v_mov_b32_e32 v5, s10 ; VI-NEXT: v_mov_b32_e32 v7, s8 ; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_mov_b32_e32 v0, s16 diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll index 97df2a0..258bc295 100644 --- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll +++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll @@ -5548,7 +5548,6 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX7LESS: ; %bb.0: ; %entry ; GFX7LESS-NEXT: s_mov_b64 s[6:7], exec ; GFX7LESS-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX7LESS-NEXT: s_mov_b32 s4, 0 ; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0 ; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v4, s7, v0 ; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 @@ -5557,33 +5556,32 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX7LESS-NEXT: s_cbranch_execz .LBB9_4 ; GFX7LESS-NEXT: ; %bb.1: ; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0) -; GFX7LESS-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x0 -; GFX7LESS-NEXT: s_bcnt1_i32_b64 s5, s[6:7] +; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0 +; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX7LESS-NEXT: s_mov_b64 s[10:11], 0 -; GFX7LESS-NEXT: v_mov_b32_e32 v5, s4 ; GFX7LESS-NEXT: s_mov_b32 s7, 0xf000 -; GFX7LESS-NEXT: s_mul_i32 s12, s5, 5 +; GFX7LESS-NEXT: s_mul_i32 s12, s6, 5 ; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0) -; GFX7LESS-NEXT: v_mov_b32_e32 v0, s14 -; GFX7LESS-NEXT: v_mov_b32_e32 v1, s15 +; GFX7LESS-NEXT: v_mov_b32_e32 v0, s4 +; GFX7LESS-NEXT: v_mov_b32_e32 v1, s5 ; GFX7LESS-NEXT: s_mov_b32 s6, -1 ; GFX7LESS-NEXT: s_mov_b32 s4, s2 ; GFX7LESS-NEXT: s_mov_b32 s5, s3 ; GFX7LESS-NEXT: .LBB9_2: ; %atomicrmw.start ; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX7LESS-NEXT: v_mov_b32_e32 v9, v1 -; GFX7LESS-NEXT: v_mov_b32_e32 v8, v0 -; GFX7LESS-NEXT: v_subrev_i32_e32 v6, vcc, s12, v8 -; GFX7LESS-NEXT: v_subb_u32_e32 v7, vcc, v9, v5, vcc +; GFX7LESS-NEXT: v_mov_b32_e32 v8, v1 +; GFX7LESS-NEXT: v_mov_b32_e32 v7, v0 +; GFX7LESS-NEXT: v_subrev_i32_e32 v5, vcc, s12, v7 +; GFX7LESS-NEXT: v_subbrev_u32_e32 v6, vcc, 0, v8, vcc ; GFX7LESS-NEXT: s_waitcnt expcnt(0) -; GFX7LESS-NEXT: v_mov_b32_e32 v0, v6 -; GFX7LESS-NEXT: v_mov_b32_e32 v1, v7 -; GFX7LESS-NEXT: v_mov_b32_e32 v2, v8 -; GFX7LESS-NEXT: v_mov_b32_e32 v3, v9 +; GFX7LESS-NEXT: v_mov_b32_e32 v0, v5 +; GFX7LESS-NEXT: v_mov_b32_e32 v1, v6 +; GFX7LESS-NEXT: v_mov_b32_e32 v2, v7 +; GFX7LESS-NEXT: v_mov_b32_e32 v3, v8 ; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; GFX7LESS-NEXT: s_waitcnt vmcnt(0) ; GFX7LESS-NEXT: buffer_wbinvl1 -; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8] ; GFX7LESS-NEXT: s_or_b64 s[10:11], vcc, s[10:11] ; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_2 @@ -5611,39 +5609,37 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0 ; GFX8-NEXT: v_mbcnt_hi_u32_b32 v4, s7, v0 -; GFX8-NEXT: s_mov_b32 s4, 0 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 ; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX8-NEXT: s_and_saveexec_b64 s[8:9], vcc ; GFX8-NEXT: s_cbranch_execz .LBB9_4 ; GFX8-NEXT: ; %bb.1: ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x0 -; GFX8-NEXT: s_bcnt1_i32_b64 s5, s[6:7] +; GFX8-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0 +; GFX8-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX8-NEXT: s_mov_b64 s[10:11], 0 -; GFX8-NEXT: v_mov_b32_e32 v5, s4 -; GFX8-NEXT: s_mul_i32 s12, s5, 5 -; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v0, s14 -; GFX8-NEXT: v_mov_b32_e32 v1, s15 ; GFX8-NEXT: s_mov_b32 s7, 0xf000 +; GFX8-NEXT: s_mul_i32 s12, s6, 5 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: v_mov_b32_e32 v1, s5 ; GFX8-NEXT: s_mov_b32 s6, -1 ; GFX8-NEXT: s_mov_b32 s4, s2 ; GFX8-NEXT: s_mov_b32 s5, s3 ; GFX8-NEXT: .LBB9_2: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX8-NEXT: v_mov_b32_e32 v9, v1 -; GFX8-NEXT: v_mov_b32_e32 v8, v0 -; GFX8-NEXT: v_subrev_u32_e32 v6, vcc, s12, v8 -; GFX8-NEXT: v_subb_u32_e32 v7, vcc, v9, v5, vcc -; GFX8-NEXT: v_mov_b32_e32 v0, v6 -; GFX8-NEXT: v_mov_b32_e32 v1, v7 -; GFX8-NEXT: v_mov_b32_e32 v2, v8 -; GFX8-NEXT: v_mov_b32_e32 v3, v9 +; GFX8-NEXT: v_mov_b32_e32 v8, v1 +; GFX8-NEXT: v_mov_b32_e32 v7, v0 +; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s12, v7 +; GFX8-NEXT: v_subbrev_u32_e32 v6, vcc, 0, v8, vcc +; GFX8-NEXT: v_mov_b32_e32 v0, v5 +; GFX8-NEXT: v_mov_b32_e32 v1, v6 +; GFX8-NEXT: v_mov_b32_e32 v2, v7 +; GFX8-NEXT: v_mov_b32_e32 v3, v8 ; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1_vol -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8] ; GFX8-NEXT: s_or_b64 s[10:11], vcc, s[10:11] ; GFX8-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GFX8-NEXT: s_cbranch_execnz .LBB9_2 @@ -5670,39 +5666,37 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX9-NEXT: s_mov_b64 s[6:7], exec ; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0 ; GFX9-NEXT: v_mbcnt_hi_u32_b32 v4, s7, v0 -; GFX9-NEXT: s_mov_b32 s4, 0 ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX9-NEXT: s_and_saveexec_b64 s[8:9], vcc ; GFX9-NEXT: s_cbranch_execz .LBB9_4 ; GFX9-NEXT: ; %bb.1: ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x0 -; GFX9-NEXT: s_bcnt1_i32_b64 s5, s[6:7] +; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0 +; GFX9-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX9-NEXT: s_mov_b64 s[10:11], 0 -; GFX9-NEXT: v_mov_b32_e32 v5, s4 -; GFX9-NEXT: s_mul_i32 s12, s5, 5 -; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s14 -; GFX9-NEXT: v_mov_b32_e32 v1, s15 ; GFX9-NEXT: s_mov_b32 s7, 0xf000 +; GFX9-NEXT: s_mul_i32 s12, s6, 5 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: v_mov_b32_e32 v1, s5 ; GFX9-NEXT: s_mov_b32 s6, -1 ; GFX9-NEXT: s_mov_b32 s4, s2 ; GFX9-NEXT: s_mov_b32 s5, s3 ; GFX9-NEXT: .LBB9_2: ; %atomicrmw.start ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX9-NEXT: v_mov_b32_e32 v9, v1 -; GFX9-NEXT: v_mov_b32_e32 v8, v0 -; GFX9-NEXT: v_subrev_co_u32_e32 v6, vcc, s12, v8 -; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, v9, v5, vcc -; GFX9-NEXT: v_mov_b32_e32 v0, v6 -; GFX9-NEXT: v_mov_b32_e32 v1, v7 -; GFX9-NEXT: v_mov_b32_e32 v2, v8 -; GFX9-NEXT: v_mov_b32_e32 v3, v9 +; GFX9-NEXT: v_mov_b32_e32 v8, v1 +; GFX9-NEXT: v_mov_b32_e32 v7, v0 +; GFX9-NEXT: v_subrev_co_u32_e32 v5, vcc, s12, v7 +; GFX9-NEXT: v_subbrev_co_u32_e32 v6, vcc, 0, v8, vcc +; GFX9-NEXT: v_mov_b32_e32 v0, v5 +; GFX9-NEXT: v_mov_b32_e32 v1, v6 +; GFX9-NEXT: v_mov_b32_e32 v2, v7 +; GFX9-NEXT: v_mov_b32_e32 v3, v8 ; GFX9-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol -; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8] ; GFX9-NEXT: s_or_b64 s[10:11], vcc, s[10:11] ; GFX9-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GFX9-NEXT: s_cbranch_execnz .LBB9_2 diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll index c3b14e8..ca50835 100644 --- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll +++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll @@ -57,8 +57,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB0_1: ; %atomicrmw.start @@ -69,7 +68,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX10-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -96,9 +95,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -106,7 +104,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -123,9 +121,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -133,7 +130,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -150,9 +147,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX7-NEXT: v_mov_b32_e32 v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -160,7 +156,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX7-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -245,8 +241,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB1_1: ; %atomicrmw.start @@ -256,7 +251,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX10-NEXT: v_mov_b32_e32 v5, v2 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: v_mov_b32_e32 v4, v1 -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -292,16 +287,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_add_f32_e32 v1, v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v4, v1 -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -318,16 +312,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v1, s20 ; GFX7-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_add_f32_e32 v1, v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v4, v1 -; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -468,7 +461,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v9, 0x400, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 @@ -481,7 +473,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 -; GFX10-NEXT: ; implicit-def: $vgpr4 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB2_1 @@ -507,7 +498,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB2_4 @@ -556,7 +547,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v9, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -569,7 +559,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 ; GFX908-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB2_1 ; GFX908-NEXT: ; %bb.2: @@ -594,7 +583,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB2_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 @@ -614,7 +603,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -627,7 +615,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 ; GFX8-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB2_1 ; GFX8-NEXT: ; %bb.2: @@ -652,7 +639,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB2_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 @@ -672,7 +659,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -684,7 +670,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 -; GFX7-NEXT: ; implicit-def: $vgpr4 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB2_1 ; GFX7-NEXT: ; %bb.2: @@ -709,7 +694,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB2_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 @@ -830,8 +815,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start @@ -842,7 +826,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX10-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -860,16 +844,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_mov_b32_e32 v2, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v5, v0 ; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -886,9 +869,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -896,7 +878,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -913,9 +895,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -923,7 +904,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -940,9 +921,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX7-NEXT: v_mov_b32_e32 v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -950,7 +930,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX7-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1035,8 +1015,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB4_1: ; %atomicrmw.start @@ -1046,7 +1025,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX10-NEXT: v_mov_b32_e32 v5, v2 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: v_mov_b32_e32 v4, v1 -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -1064,15 +1043,13 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v3, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v1, s6 ; GFX90A-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_add_f32_e32 v2, v3, v0 ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 @@ -1089,16 +1066,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: v_add_f32_e32 v1, v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v4, v1 -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -1115,16 +1091,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_add_f32_e32 v1, v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v4, v1 -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -1141,16 +1116,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v1, s20 ; GFX7-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_add_f32_e32 v1, v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v4, v1 -; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -1223,9 +1197,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4 +; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 @@ -1237,7 +1209,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -1255,8 +1227,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB5_1: ; %atomicrmw.start @@ -1267,7 +1238,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX10-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -1285,16 +1256,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX90A-NEXT: v_mov_b32_e32 v2, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v5, v0 ; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1311,9 +1281,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -1321,7 +1290,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX908-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1338,9 +1307,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -1348,7 +1316,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX8-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1365,9 +1333,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX7-NEXT: v_mov_b32_e32 v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -1375,7 +1342,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX7-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1448,9 +1415,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4 +; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 @@ -1462,7 +1427,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -1480,8 +1445,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start @@ -1492,7 +1456,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -1510,16 +1474,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX90A-NEXT: v_mov_b32_e32 v2, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v5, v0 ; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1536,9 +1499,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -1546,7 +1508,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1563,9 +1525,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -1573,7 +1534,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1590,9 +1551,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_mov_b32_e32 v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -1600,7 +1560,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1673,9 +1633,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote_memory__amdgpu_ignore_denormal_mode: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4 +; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 @@ -1687,7 +1645,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -1705,8 +1663,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB7_1: ; %atomicrmw.start @@ -1717,7 +1674,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -1735,16 +1692,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX90A-NEXT: v_mov_b32_e32 v2, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v5, v0 ; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1761,9 +1717,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -1771,7 +1726,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1788,9 +1743,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -1798,7 +1752,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1815,9 +1769,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_mov_b32_e32 v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -1825,7 +1778,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1883,24 +1836,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 +; GFX12-NEXT: v_mov_b32_e32 v10, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_add_f64_e32 v[6:7], v[8:9], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_add_f64_e32 v[7:8], v[9:10], v[4:5] -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX12-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX12-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1925,25 +1876,23 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 +; GFX11-NEXT: v_mov_b32_e32 v10, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX11-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX11-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1958,26 +1907,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 ; GFX10-NEXT: v_mov_b32_e32 v5, v1 -; GFX10-NEXT: s_add_i32 s4, s20, 0x800 -; GFX10-NEXT: v_mov_b32_e32 v6, s4 -; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX10-NEXT: v_mov_b32_e32 v10, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v10, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, v0 +; GFX10-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX10-NEXT: v_mov_b32_e32 v0, v7 -; GFX10-NEXT: v_mov_b32_e32 v1, v8 -; GFX10-NEXT: v_mov_b32_e32 v2, v9 -; GFX10-NEXT: v_mov_b32_e32 v3, v10 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX10-NEXT: v_mov_b32_e32 v0, v6 +; GFX10-NEXT: v_mov_b32_e32 v1, v7 +; GFX10-NEXT: v_mov_b32_e32 v2, v8 +; GFX10-NEXT: v_mov_b32_e32 v3, v9 +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX10-NEXT: v_mov_b32_e32 v9, v1 +; GFX10-NEXT: v_mov_b32_e32 v8, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB8_1 @@ -1999,26 +1947,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v10, s20 ; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v0, v6 +; GFX908-NEXT: v_mov_b32_e32 v1, v7 +; GFX908-NEXT: v_mov_b32_e32 v2, v8 +; GFX908-NEXT: v_mov_b32_e32 v3, v9 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX908-NEXT: v_mov_b32_e32 v9, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v8, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB8_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2030,26 +1977,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: v_mov_b32_e32 v5, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v10, s20 ; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v0, v6 +; GFX8-NEXT: v_mov_b32_e32 v1, v7 +; GFX8-NEXT: v_mov_b32_e32 v2, v8 +; GFX8-NEXT: v_mov_b32_e32 v3, v9 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX8-NEXT: v_mov_b32_e32 v9, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v8, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB8_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2061,26 +2007,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v4, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX7-NEXT: v_mov_b32_e32 v5, v1 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX7-NEXT: s_add_i32 s6, s20, 0x800 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s6 +; GFX7-NEXT: v_mov_b32_e32 v10, s20 ; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v10, v1 -; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v8 -; GFX7-NEXT: v_mov_b32_e32 v2, v9 -; GFX7-NEXT: v_mov_b32_e32 v3, v10 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB8_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2092,27 +2037,27 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v4, v0 ; GFX6-NEXT: v_mov_b32_e32 v0, s20 -; GFX6-NEXT: v_mov_b32_e32 v5, v1 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX6-NEXT: s_add_i32 s6, s20, 0x800 +; GFX6-NEXT: v_mov_b32_e32 v5, v1 ; GFX6-NEXT: s_mov_b64 s[4:5], 0 -; GFX6-NEXT: v_mov_b32_e32 v6, s6 +; GFX6-NEXT: v_mov_b32_e32 v10, s6 ; GFX6-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v10, v1 -; GFX6-NEXT: v_mov_b32_e32 v9, v0 -; GFX6-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] +; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, v7 -; GFX6-NEXT: v_mov_b32_e32 v1, v8 -; GFX6-NEXT: v_mov_b32_e32 v2, v9 -; GFX6-NEXT: v_mov_b32_e32 v3, v10 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB8_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2133,9 +2078,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_mov_b32_e32 v2, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 +; GFX12-NEXT: v_mov_b32_e32 v6, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 ; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start @@ -2146,7 +2089,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[4:5] @@ -2174,9 +2117,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v2, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 +; GFX11-NEXT: v_mov_b32_e32 v6, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start @@ -2187,7 +2128,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -2205,8 +2146,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x800 -; GFX10-NEXT: v_mov_b32_e32 v6, s4 +; GFX10-NEXT: v_mov_b32_e32 v6, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB9_1: ; %atomicrmw.start @@ -2218,7 +2158,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: v_mov_b32_e32 v8, v3 ; GFX10-NEXT: v_mov_b32_e32 v7, v2 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -2246,9 +2186,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v2, s20 ; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v6, s20 ; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -2257,7 +2196,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v9, v4 ; GFX908-NEXT: v_mov_b32_e32 v8, v3 ; GFX908-NEXT: v_mov_b32_e32 v7, v2 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[4:5] @@ -2275,9 +2214,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v2, s20 ; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v6, s20 ; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -2286,7 +2224,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v9, v4 ; GFX8-NEXT: v_mov_b32_e32 v8, v3 ; GFX8-NEXT: v_mov_b32_e32 v7, v2 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[4:5] @@ -2304,9 +2242,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 -; GFX7-NEXT: s_add_i32 s6, s20, 0x800 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s6 +; GFX7-NEXT: v_mov_b32_e32 v6, s20 ; GFX7-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -2315,7 +2252,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX7-NEXT: v_mov_b32_e32 v9, v4 ; GFX7-NEXT: v_mov_b32_e32 v8, v3 ; GFX7-NEXT: v_mov_b32_e32 v7, v2 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[4:5] @@ -2373,10 +2310,9 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 ; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 -; GFX12-NEXT: v_add_nc_u32_e32 v15, 0x800, v4 ; GFX12-NEXT: s_mov_b32 s1, exec_lo ; GFX12-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-NEXT: v_readfirstlane_b32 s4, v9 ; GFX12-NEXT: v_readfirstlane_b32 s5, v10 ; GFX12-NEXT: v_readfirstlane_b32 s6, v7 @@ -2390,7 +2326,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], null offen offset:2048 -; GFX12-NEXT: ; implicit-def: $vgpr4 ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB10_1 ; GFX12-NEXT: ; %bb.2: @@ -2420,7 +2355,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB10_4 ; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1 @@ -2474,22 +2409,21 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 ; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 -; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x800, v4 ; GFX11-NEXT: s_mov_b32 s1, 0 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3) +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_readfirstlane_b32 s4, v9 ; GFX11-NEXT: v_readfirstlane_b32 s5, v10 ; GFX11-NEXT: v_readfirstlane_b32 s6, v7 ; GFX11-NEXT: v_readfirstlane_b32 s7, v8 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[9:10] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[7:8] ; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], 0 offen offset:2048 -; GFX11-NEXT: ; implicit-def: $vgpr4 ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB10_1 ; GFX11-NEXT: ; %bb.2: @@ -2518,7 +2452,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], 0 offen offset:2048 glc ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB10_4 ; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1 @@ -2543,7 +2477,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX10-NEXT: v_mov_b32_e32 v7, v2 ; GFX10-NEXT: v_mov_b32_e32 v10, v1 ; GFX10-NEXT: v_mov_b32_e32 v9, v0 -; GFX10-NEXT: v_add_nc_u32_e32 v15, 0x800, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1 @@ -2556,7 +2489,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX10-NEXT: ; implicit-def: $vgpr4 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB10_1 @@ -2584,7 +2516,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB10_4 @@ -2640,7 +2572,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: v_mov_b32_e32 v7, v2 ; GFX908-NEXT: v_mov_b32_e32 v10, v1 ; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_add_u32_e32 v15, 0x800, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v9 @@ -2653,7 +2584,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 ; GFX908-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX908-NEXT: ; implicit-def: $vgpr4 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB10_1 ; GFX908-NEXT: ; %bb.2: @@ -2680,7 +2610,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB10_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1 @@ -2704,7 +2634,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: v_mov_b32_e32 v7, v2 ; GFX8-NEXT: v_mov_b32_e32 v10, v1 ; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_add_u32_e32 v15, vcc, 0x800, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v9 @@ -2717,7 +2646,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 ; GFX8-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX8-NEXT: ; implicit-def: $vgpr4 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB10_1 ; GFX8-NEXT: ; %bb.2: @@ -2744,7 +2672,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB10_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1 @@ -2768,7 +2696,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX7-NEXT: v_mov_b32_e32 v7, v2 ; GFX7-NEXT: v_mov_b32_e32 v10, v1 ; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_add_i32_e32 v15, vcc, 0x800, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v9 @@ -2780,7 +2707,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX7-NEXT: ; implicit-def: $vgpr4 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB10_1 ; GFX7-NEXT: ; %bb.2: @@ -2807,7 +2733,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB10_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1 @@ -2903,24 +2829,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 +; GFX12-NEXT: v_mov_b32_e32 v10, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_add_f64_e32 v[6:7], v[8:9], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_add_f64_e32 v[7:8], v[9:10], v[4:5] -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX12-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX12-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2945,25 +2869,23 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 +; GFX11-NEXT: v_mov_b32_e32 v10, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX11-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX11-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2978,26 +2900,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 ; GFX10-NEXT: v_mov_b32_e32 v5, v1 -; GFX10-NEXT: s_add_i32 s4, s20, 0x800 -; GFX10-NEXT: v_mov_b32_e32 v6, s4 -; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX10-NEXT: v_mov_b32_e32 v10, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v10, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, v0 +; GFX10-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX10-NEXT: v_mov_b32_e32 v0, v7 -; GFX10-NEXT: v_mov_b32_e32 v1, v8 -; GFX10-NEXT: v_mov_b32_e32 v2, v9 -; GFX10-NEXT: v_mov_b32_e32 v3, v10 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX10-NEXT: v_mov_b32_e32 v0, v6 +; GFX10-NEXT: v_mov_b32_e32 v1, v7 +; GFX10-NEXT: v_mov_b32_e32 v2, v8 +; GFX10-NEXT: v_mov_b32_e32 v3, v9 +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX10-NEXT: v_mov_b32_e32 v9, v1 +; GFX10-NEXT: v_mov_b32_e32 v8, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB11_1 @@ -3010,23 +2931,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v4, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 +; GFX90A-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX90A-NEXT: v_mov_b32_e32 v5, v1 -; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x800 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v6, s6 +; GFX90A-NEXT: v_mov_b32_e32 v10, s20 ; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1] -; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX90A-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[8:9], v[8:9] op_sel:[0,1] +; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 -; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[0:1], v[0:1] op_sel:[0,1] ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB11_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3038,26 +2958,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v10, s20 ; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v0, v6 +; GFX908-NEXT: v_mov_b32_e32 v1, v7 +; GFX908-NEXT: v_mov_b32_e32 v2, v8 +; GFX908-NEXT: v_mov_b32_e32 v3, v9 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX908-NEXT: v_mov_b32_e32 v9, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v8, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB11_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3069,26 +2988,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: v_mov_b32_e32 v5, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v10, s20 ; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v0, v6 +; GFX8-NEXT: v_mov_b32_e32 v1, v7 +; GFX8-NEXT: v_mov_b32_e32 v2, v8 +; GFX8-NEXT: v_mov_b32_e32 v3, v9 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX8-NEXT: v_mov_b32_e32 v9, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v8, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB11_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3100,26 +3018,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v4, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX7-NEXT: v_mov_b32_e32 v5, v1 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX7-NEXT: s_add_i32 s6, s20, 0x800 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s6 +; GFX7-NEXT: v_mov_b32_e32 v10, s20 ; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v10, v1 -; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v8 -; GFX7-NEXT: v_mov_b32_e32 v2, v9 -; GFX7-NEXT: v_mov_b32_e32 v3, v10 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB11_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3131,27 +3048,27 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v4, v0 ; GFX6-NEXT: v_mov_b32_e32 v0, s20 -; GFX6-NEXT: v_mov_b32_e32 v5, v1 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX6-NEXT: s_add_i32 s6, s20, 0x800 +; GFX6-NEXT: v_mov_b32_e32 v5, v1 ; GFX6-NEXT: s_mov_b64 s[4:5], 0 -; GFX6-NEXT: v_mov_b32_e32 v6, s6 +; GFX6-NEXT: v_mov_b32_e32 v10, s6 ; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v10, v1 -; GFX6-NEXT: v_mov_b32_e32 v9, v0 -; GFX6-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] +; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, v7 -; GFX6-NEXT: v_mov_b32_e32 v1, v8 -; GFX6-NEXT: v_mov_b32_e32 v2, v9 -; GFX6-NEXT: v_mov_b32_e32 v3, v10 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB11_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3173,24 +3090,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 +; GFX12-NEXT: v_mov_b32_e32 v10, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_add_f64_e32 v[6:7], v[8:9], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_add_f64_e32 v[7:8], v[9:10], v[4:5] -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX12-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX12-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -3215,25 +3130,23 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 +; GFX11-NEXT: v_mov_b32_e32 v10, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX11-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX11-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -3248,26 +3161,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 ; GFX10-NEXT: v_mov_b32_e32 v5, v1 -; GFX10-NEXT: s_add_i32 s4, s20, 0x800 -; GFX10-NEXT: v_mov_b32_e32 v6, s4 -; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX10-NEXT: v_mov_b32_e32 v10, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v10, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, v0 +; GFX10-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX10-NEXT: v_mov_b32_e32 v0, v7 -; GFX10-NEXT: v_mov_b32_e32 v1, v8 -; GFX10-NEXT: v_mov_b32_e32 v2, v9 -; GFX10-NEXT: v_mov_b32_e32 v3, v10 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX10-NEXT: v_mov_b32_e32 v0, v6 +; GFX10-NEXT: v_mov_b32_e32 v1, v7 +; GFX10-NEXT: v_mov_b32_e32 v2, v8 +; GFX10-NEXT: v_mov_b32_e32 v3, v9 +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX10-NEXT: v_mov_b32_e32 v9, v1 +; GFX10-NEXT: v_mov_b32_e32 v8, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB12_1 @@ -3289,26 +3201,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v10, s20 ; GFX908-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v0, v6 +; GFX908-NEXT: v_mov_b32_e32 v1, v7 +; GFX908-NEXT: v_mov_b32_e32 v2, v8 +; GFX908-NEXT: v_mov_b32_e32 v3, v9 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX908-NEXT: v_mov_b32_e32 v9, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v8, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB12_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3320,26 +3231,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: v_mov_b32_e32 v5, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v10, s20 ; GFX8-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v0, v6 +; GFX8-NEXT: v_mov_b32_e32 v1, v7 +; GFX8-NEXT: v_mov_b32_e32 v2, v8 +; GFX8-NEXT: v_mov_b32_e32 v3, v9 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX8-NEXT: v_mov_b32_e32 v9, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v8, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB12_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3351,26 +3261,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v4, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX7-NEXT: v_mov_b32_e32 v5, v1 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX7-NEXT: s_add_i32 s6, s20, 0x800 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s6 +; GFX7-NEXT: v_mov_b32_e32 v10, s20 ; GFX7-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v10, v1 -; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v8 -; GFX7-NEXT: v_mov_b32_e32 v2, v9 -; GFX7-NEXT: v_mov_b32_e32 v3, v10 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB12_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3382,27 +3291,27 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v4, v0 ; GFX6-NEXT: v_mov_b32_e32 v0, s20 -; GFX6-NEXT: v_mov_b32_e32 v5, v1 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX6-NEXT: s_add_i32 s6, s20, 0x800 +; GFX6-NEXT: v_mov_b32_e32 v5, v1 ; GFX6-NEXT: s_mov_b64 s[4:5], 0 -; GFX6-NEXT: v_mov_b32_e32 v6, s6 +; GFX6-NEXT: v_mov_b32_e32 v10, s6 ; GFX6-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v10, v1 -; GFX6-NEXT: v_mov_b32_e32 v9, v0 -; GFX6-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] +; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, v7 -; GFX6-NEXT: v_mov_b32_e32 v1, v8 -; GFX6-NEXT: v_mov_b32_e32 v2, v9 -; GFX6-NEXT: v_mov_b32_e32 v3, v10 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB12_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7028,9 +6937,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4 +; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 @@ -7042,7 +6949,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -7060,8 +6967,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start @@ -7072,7 +6978,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -7099,9 +7005,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -7109,7 +7014,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -7126,9 +7031,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -7138,7 +7042,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_or_b32_e32 v4, v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -7156,7 +7060,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -7164,7 +7067,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -7181,7 +7084,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_or_b32_e32 v5, v7, v0 ; GFX7-NEXT: v_mov_b32_e32 v8, v6 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7 @@ -7277,9 +7180,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v1, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v3, s4 +; GFX11-NEXT: v_mov_b32_e32 v3, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: .LBB20_1: ; %atomicrmw.start @@ -7290,7 +7191,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_mov_b32_e32 v4, v1 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -7308,8 +7209,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB20_1: ; %atomicrmw.start @@ -7319,7 +7219,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX10-NEXT: v_mov_b32_e32 v5, v2 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: v_mov_b32_e32 v4, v1 -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -7355,9 +7255,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -7366,7 +7265,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX8-NEXT: v_or_b32_e32 v1, v4, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v4, v1 -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -7385,7 +7284,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -7393,7 +7291,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2 ; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 @@ -7410,7 +7308,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: v_or_b32_e32 v4, v6, v3 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6 @@ -7543,7 +7441,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x400, v4 ; GFX11-NEXT: s_mov_b32 s1, 0 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -7558,7 +7455,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: buffer_load_b32 v8, v4, s[4:7], 0 offen offset:1024 -; GFX11-NEXT: ; implicit-def: $vgpr4 ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB21_1 ; GFX11-NEXT: ; %bb.2: @@ -7587,7 +7483,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[6:7], v9, s[4:7], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[6:7], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB21_4 ; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 @@ -7609,7 +7505,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v9, 0x400, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -7622,7 +7517,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 -; GFX10-NEXT: ; implicit-def: $vgpr4 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB21_1 @@ -7648,7 +7542,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB21_4 @@ -7697,7 +7591,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v9, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -7710,7 +7603,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 ; GFX908-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB21_1 ; GFX908-NEXT: ; %bb.2: @@ -7735,7 +7627,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB21_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 @@ -7755,7 +7647,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -7768,7 +7659,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 ; GFX8-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB21_1 ; GFX8-NEXT: ; %bb.2: @@ -7778,9 +7668,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_add_f16_sdwa v4, v8, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_add_f16_e32 v6, v8, v5 -; GFX8-NEXT: v_or_b32_e32 v7, v6, v4 +; GFX8-NEXT: v_add_f16_sdwa v6, v8, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_add_f16_e32 v7, v8, v5 +; GFX8-NEXT: v_or_b32_e32 v7, v7, v6 ; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: s_mov_b64 s[12:13], exec ; GFX8-NEXT: v_mov_b32_e32 v7, v8 @@ -7795,7 +7685,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB21_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 @@ -7815,7 +7705,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -7826,39 +7715,38 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3] ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] -; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX7-NEXT: ; implicit-def: $vgpr4 +; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB21_1 ; GFX7-NEXT: ; %bb.2: ; GFX7-NEXT: s_mov_b64 exec, s[6:7] ; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6 -; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v5 +; GFX7-NEXT: v_cvt_f16_f32_e32 v9, v5 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8 ; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5 ; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v6 -; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v9 ; GFX7-NEXT: s_mov_b64 s[6:7], 0 ; GFX7-NEXT: .LBB21_3: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Loop Header: Depth=1 ; GFX7-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5 -; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 +; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v7 ; GFX7-NEXT: s_mov_b64 s[12:13], exec -; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5 -; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v4 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v5 +; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v6 ; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX7-NEXT: v_add_f32_e32 v6, v6, v10 -; GFX7-NEXT: v_add_f32_e32 v7, v7, v11 -; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v6 +; GFX7-NEXT: v_or_b32_e32 v6, v6, v5 +; GFX7-NEXT: v_add_f32_e32 v7, v7, v10 +; GFX7-NEXT: v_add_f32_e32 v8, v8, v11 ; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7 -; GFX7-NEXT: v_or_b32_e32 v6, v4, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; GFX7-NEXT: v_or_b32_e32 v5, v7, v4 -; GFX7-NEXT: v_mov_b32_e32 v8, v6 -; GFX7-NEXT: v_mov_b32_e32 v7, v5 +; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX7-NEXT: v_or_b32_e32 v5, v8, v5 +; GFX7-NEXT: v_mov_b32_e32 v9, v6 +; GFX7-NEXT: v_mov_b32_e32 v8, v5 ; GFX7-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX7-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -7870,23 +7758,23 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v9, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB21_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX7-NEXT: s_mov_b64 exec, s[12:13] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8 ; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v6 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6 ; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX7-NEXT: s_cbranch_execnz .LBB21_3 ; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v4 +; GFX7-NEXT: v_mov_b32_e32 v0, v7 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -8003,9 +7891,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4 +; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 @@ -8017,7 +7903,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -8035,8 +7921,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start @@ -8047,7 +7932,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX10-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -8065,16 +7950,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX90A-NEXT: v_mov_b32_e32 v2, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v5, v0 ; GFX90A-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -8091,9 +7975,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -8101,7 +7984,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX908-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -8118,9 +8001,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -8130,7 +8012,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX8-NEXT: v_or_b32_e32 v4, v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -8148,7 +8030,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -8156,7 +8037,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -8173,7 +8054,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX7-NEXT: v_or_b32_e32 v5, v7, v0 ; GFX7-NEXT: v_mov_b32_e32 v8, v6 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7 @@ -8269,9 +8150,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v1, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v3, s4 +; GFX11-NEXT: v_mov_b32_e32 v3, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start @@ -8282,7 +8161,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_mov_b32_e32 v4, v1 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -8300,8 +8179,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start @@ -8311,7 +8189,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX10-NEXT: v_mov_b32_e32 v5, v2 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: v_mov_b32_e32 v4, v1 -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -8329,15 +8207,13 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v3, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v1, s6 ; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_pk_add_f16 v2, v3, v0 ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 @@ -8354,16 +8230,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: v_pk_add_f16 v1, v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v4, v1 -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -8380,9 +8255,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -8391,7 +8265,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX8-NEXT: v_or_b32_e32 v1, v4, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v4, v1 -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -8410,7 +8284,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -8418,7 +8291,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2 ; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 @@ -8435,7 +8308,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX7-NEXT: v_or_b32_e32 v4, v6, v3 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6 @@ -8530,9 +8403,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4 +; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 @@ -8544,7 +8415,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -8562,8 +8433,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB24_1: ; %atomicrmw.start @@ -8574,7 +8444,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -8592,16 +8462,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX90A-NEXT: v_mov_b32_e32 v2, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v5, v0 ; GFX90A-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -8618,9 +8487,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -8628,7 +8496,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -8645,9 +8513,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -8657,7 +8524,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_or_b32_e32 v4, v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -8675,7 +8542,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -8683,7 +8549,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -8700,7 +8566,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_or_b32_e32 v5, v7, v0 ; GFX7-NEXT: v_mov_b32_e32 v8, v6 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7 @@ -8796,9 +8662,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v1, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v3, s4 +; GFX11-NEXT: v_mov_b32_e32 v3, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: .LBB25_1: ; %atomicrmw.start @@ -8809,7 +8673,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_mov_b32_e32 v4, v1 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -8827,8 +8691,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB25_1: ; %atomicrmw.start @@ -8838,7 +8701,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX10-NEXT: v_mov_b32_e32 v5, v2 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: v_mov_b32_e32 v4, v1 -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -8856,15 +8719,13 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v3, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v1, s6 ; GFX90A-NEXT: .LBB25_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_pk_add_f16 v2, v3, v0 ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 @@ -8881,16 +8742,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB25_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: v_pk_add_f16 v1, v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v4, v1 -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -8907,9 +8767,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB25_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -8918,7 +8777,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX8-NEXT: v_or_b32_e32 v1, v4, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v4, v1 -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -8937,7 +8796,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -8945,7 +8803,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2 ; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB25_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 @@ -8962,7 +8820,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX7-NEXT: v_or_b32_e32 v4, v6, v3 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6 @@ -9054,13 +8912,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -9082,7 +8939,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5] ; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7] -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -9097,12 +8954,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1 +; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1 ; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 -; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start @@ -9131,7 +8987,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -9149,10 +9005,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1 ; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 @@ -9183,7 +9038,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -9202,9 +9057,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 @@ -9230,7 +9084,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: v_mov_b32_e32 v1, v6 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -9248,13 +9102,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -9275,7 +9128,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -9292,13 +9145,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -9320,7 +9172,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: v_mov_b32_e32 v1, v6 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -9337,11 +9189,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -9366,7 +9217,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: v_mov_b32_e32 v1, v6 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -9382,7 +9233,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -9391,7 +9241,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 @@ -9406,7 +9256,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: v_alignbit_b32 v0, v0, v5, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v1 ; GFX7-NEXT: v_mov_b32_e32 v5, v0 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -9488,13 +9338,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -9515,7 +9364,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] ; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -9531,11 +9380,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start @@ -9561,7 +9408,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -9580,11 +9427,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start @@ -9610,7 +9455,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -9629,12 +9474,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 -; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -9656,7 +9500,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v6, v1 ; GFX10-NEXT: v_mov_b32_e32 v5, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -9674,13 +9518,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -9700,7 +9543,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -9717,13 +9560,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -9744,7 +9586,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v6, v1 ; GFX908-NEXT: v_mov_b32_e32 v5, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -9761,11 +9603,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -9789,7 +9630,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -9806,7 +9647,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -9815,7 +9655,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 @@ -9830,7 +9670,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4 @@ -9930,7 +9770,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX942-NEXT: s_mov_b64 s[2:3], exec ; GFX942-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -9942,40 +9781,39 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3] ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] -; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024 -; GFX942-NEXT: ; implicit-def: $vgpr4 +; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB28_1 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: s_mov_b64 exec, s[2:3] ; GFX942-NEXT: s_mov_b64 s[2:3], 0 -; GFX942-NEXT: v_lshlrev_b32_e32 v9, 16, v5 +; GFX942-NEXT: v_lshlrev_b32_e32 v10, 16, v5 ; GFX942-NEXT: s_movk_i32 s10, 0x7fff -; GFX942-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX942-NEXT: s_mov_b32 s11, 0x7060302 ; GFX942-NEXT: .LBB28_3: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Loop Header: Depth=1 ; GFX942-NEXT: ; Child Loop BB28_4 Depth 2 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v7 -; GFX942-NEXT: v_add_f32_e32 v4, v4, v9 -; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX942-NEXT: v_add3_u32 v5, v5, v4, s10 -; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX942-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX942-NEXT: v_add_f32_e32 v6, v6, v10 +; GFX942-NEXT: v_bfe_u32 v7, v6, 16, 1 +; GFX942-NEXT: v_add3_u32 v7, v7, v6, s10 +; GFX942-NEXT: v_or_b32_e32 v8, 0x400000, v6 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 ; GFX942-NEXT: s_mov_b64 s[8:9], exec ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v7 -; GFX942-NEXT: v_add_f32_e32 v5, v5, v10 -; GFX942-NEXT: v_bfe_u32 v6, v5, 16, 1 -; GFX942-NEXT: v_add3_u32 v6, v6, v5, s10 -; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX942-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc +; GFX942-NEXT: v_and_b32_e32 v7, 0xffff0000, v9 +; GFX942-NEXT: v_add_f32_e32 v7, v7, v5 +; GFX942-NEXT: v_bfe_u32 v8, v7, 16, 1 +; GFX942-NEXT: v_add3_u32 v8, v8, v7, s10 +; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc -; GFX942-NEXT: v_perm_b32 v6, v5, v4, s11 -; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7] +; GFX942-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc +; GFX942-NEXT: v_perm_b32 v8, v7, v6, s11 +; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9] ; GFX942-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX942-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -9988,27 +9826,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB28_4 ; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX942-NEXT: s_mov_b64 exec, s[8:9] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v7, v4 +; GFX942-NEXT: v_mov_b32_e32 v9, v6 ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB28_3 ; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX942-NEXT: s_or_b64 exec, exec, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v0, v4 +; GFX942-NEXT: v_mov_b32_e32 v0, v6 ; GFX942-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0 ; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-TRUE16-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 @@ -10022,8 +9859,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4 +; GFX11-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_1 ; GFX11-TRUE16-NEXT: ; %bb.2: @@ -10036,28 +9872,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: ; Child Loop BB28_4 Depth 2 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v8 :: v_dual_add_f32 v4, v4, v9 -; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, v6, v8 :: v_dual_add_f32 v5, v5, v9 +; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo +; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-TRUE16-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -10071,14 +9907,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_4 ; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv ; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -10088,13 +9924,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0 ; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-FAKE16-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 @@ -10108,8 +9943,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4 +; GFX11-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_1 ; GFX11-FAKE16-NEXT: ; %bb.2: @@ -10122,28 +9956,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: ; Child Loop BB28_4 Depth 2 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v9 :: v_dual_add_f32 v4, v4, v8 -; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11-FAKE16-NEXT: v_dual_add_f32 v6, v6, v9 :: v_dual_add_f32 v5, v5, v8 +; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo +; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-FAKE16-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -10157,14 +9991,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_4 ; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv ; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -10174,13 +10008,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2 ; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 @@ -10192,8 +10025,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3] ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 -; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX10-NEXT: ; implicit-def: $vgpr4 +; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB28_1 @@ -10205,25 +10037,25 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX10-NEXT: ; =>This Loop Header: Depth=1 ; GFX10-NEXT: ; Child Loop BB28_4 Depth 2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_add_f32_e32 v4, v4, v8 -; GFX10-NEXT: v_add_f32_e32 v5, v5, v9 -; GFX10-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX10-NEXT: v_bfe_u32 v11, v5, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX10-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX10-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX10-NEXT: v_add_f32_e32 v5, v5, v8 +; GFX10-NEXT: v_add_f32_e32 v6, v6, v9 +; GFX10-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX10-NEXT: v_bfe_u32 v11, v6, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX10-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo -; GFX10-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 -; GFX10-NEXT: v_mov_b32_e32 v4, v5 +; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX10-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX10-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX10-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo +; GFX10-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v5, v6 +; GFX10-NEXT: v_mov_b32_e32 v6, v7 ; GFX10-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX10-NEXT: v_readfirstlane_b32 s8, v0 @@ -10235,15 +10067,15 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB28_4 ; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX10-NEXT: s_mov_b32 exec_lo, s6 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX10-NEXT: v_mov_b32_e32 v7, v5 ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 @@ -10252,13 +10084,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX10-NEXT: s_cbranch_execnz .LBB28_3 ; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: v_mov_b32_e32 v0, v4 +; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX90A-NEXT: s_mov_b64 s[6:7], exec ; GFX90A-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -10270,38 +10101,37 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX90A-NEXT: ; implicit-def: $vgpr4 +; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024 ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB28_1 ; GFX90A-NEXT: ; %bb.2: ; GFX90A-NEXT: s_mov_b64 exec, s[6:7] ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 -; GFX90A-NEXT: v_lshlrev_b32_e32 v9, 16, v5 +; GFX90A-NEXT: v_lshlrev_b32_e32 v10, 16, v5 ; GFX90A-NEXT: s_movk_i32 s14, 0x7fff -; GFX90A-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX90A-NEXT: s_mov_b32 s15, 0x7060302 ; GFX90A-NEXT: .LBB28_3: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB28_4 Depth 2 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v7 -; GFX90A-NEXT: v_add_f32_e32 v4, v4, v9 -; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s14 -; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v7 -; GFX90A-NEXT: v_add_f32_e32 v5, v5, v10 -; GFX90A-NEXT: v_bfe_u32 v6, v5, 16, 1 -; GFX90A-NEXT: v_add3_u32 v6, v6, v5, s14 -; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX90A-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc -; GFX90A-NEXT: v_perm_b32 v6, v5, v4, s15 +; GFX90A-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX90A-NEXT: v_add_f32_e32 v6, v6, v10 +; GFX90A-NEXT: v_bfe_u32 v7, v6, 16, 1 +; GFX90A-NEXT: v_add3_u32 v7, v7, v6, s14 +; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX90A-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc +; GFX90A-NEXT: v_and_b32_e32 v7, 0xffff0000, v9 +; GFX90A-NEXT: v_add_f32_e32 v7, v7, v5 +; GFX90A-NEXT: v_bfe_u32 v8, v7, 16, 1 +; GFX90A-NEXT: v_add3_u32 v8, v8, v7, s14 +; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX90A-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc +; GFX90A-NEXT: v_perm_b32 v8, v7, v6, s15 ; GFX90A-NEXT: s_mov_b64 s[12:13], exec -; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1] ; GFX90A-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -10313,27 +10143,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB28_4 ; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX90A-NEXT: s_mov_b64 exec, s[12:13] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v7, v4 +; GFX90A-NEXT: v_mov_b32_e32 v9, v6 ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB28_3 ; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v0, v4 +; GFX90A-NEXT: v_mov_b32_e32 v0, v6 ; GFX90A-NEXT: s_setpc_b64 s[30:31] ; ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -10345,8 +10174,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 -; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 +; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB28_1 ; GFX908-NEXT: ; %bb.2: @@ -10360,24 +10188,24 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX908-NEXT: ; =>This Loop Header: Depth=1 ; GFX908-NEXT: ; Child Loop BB28_4 Depth 2 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX908-NEXT: v_add_f32_e32 v4, v4, v8 -; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX908-NEXT: v_add3_u32 v5, v5, v4, s14 -; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v4 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc -; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX908-NEXT: v_add_f32_e32 v5, v5, v9 -; GFX908-NEXT: v_bfe_u32 v10, v5, 16, 1 -; GFX908-NEXT: v_add3_u32 v10, v10, v5, s14 -; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v5 +; GFX908-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX908-NEXT: v_add_f32_e32 v5, v5, v8 +; GFX908-NEXT: v_bfe_u32 v6, v5, 16, 1 +; GFX908-NEXT: v_add3_u32 v6, v6, v5, s14 +; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v5 ; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX908-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc -; GFX908-NEXT: v_perm_b32 v5, v5, v4, s15 -; GFX908-NEXT: v_mov_b32_e32 v4, v5 -; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc +; GFX908-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX908-NEXT: v_add_f32_e32 v6, v6, v9 +; GFX908-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX908-NEXT: v_add3_u32 v10, v10, v6, s14 +; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX908-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX908-NEXT: v_perm_b32 v6, v6, v5, s15 ; GFX908-NEXT: v_mov_b32_e32 v5, v6 +; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_mov_b32_e32 v6, v7 ; GFX908-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -10389,27 +10217,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB28_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX908-NEXT: s_mov_b64 exec, s[12:13] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v6, v4 +; GFX908-NEXT: v_mov_b32_e32 v7, v5 ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB28_3 ; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v0, v4 +; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -10421,8 +10248,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 -; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 +; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB28_1 ; GFX8-NEXT: ; %bb.2: @@ -10434,27 +10260,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB28_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX8-NEXT: v_add_f32_e32 v4, v4, v8 -; GFX8-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v4 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5 -; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc -; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX8-NEXT: v_add_f32_e32 v5, v5, v9 -; GFX8-NEXT: v_bfe_u32 v10, v5, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v5 -; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10 -; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX8-NEXT: v_add_f32_e32 v5, v5, v8 +; GFX8-NEXT: v_bfe_u32 v6, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v5 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX8-NEXT: v_alignbit_b32 v5, v5, v4, 16 -; GFX8-NEXT: v_mov_b32_e32 v4, v5 -; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX8-NEXT: v_add_f32_e32 v6, v6, v9 +; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX8-NEXT: v_alignbit_b32 v6, v6, v5, 16 ; GFX8-NEXT: v_mov_b32_e32 v5, v6 +; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -10466,27 +10292,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB28_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX8-NEXT: s_mov_b64 exec, s[12:13] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v7, v5 ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB28_3 ; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v0, v4 +; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e32 v8, vcc, 0x400, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -10497,36 +10322,35 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3] ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] -; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX7-NEXT: ; implicit-def: $vgpr4 +; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB28_1 ; GFX7-NEXT: ; %bb.2: ; GFX7-NEXT: s_mov_b64 exec, s[6:7] ; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 -; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v5 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 ; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v6 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; GFX7-NEXT: s_mov_b64 s[6:7], 0 -; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v5 ; GFX7-NEXT: .LBB28_3: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Loop Header: Depth=1 ; GFX7-NEXT: ; Child Loop BB28_4 Depth 2 -; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v7 -; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v4 -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7 -; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 -; GFX7-NEXT: v_add_f32_e32 v4, v4, v10 -; GFX7-NEXT: v_add_f32_e32 v6, v6, v9 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_alignbit_b32 v4, v4, v6, 16 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v7 -; GFX7-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; GFX7-NEXT: v_mov_b32_e32 v7, v5 +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v8 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v7 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 +; GFX7-NEXT: v_add_f32_e32 v8, v8, v11 +; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; GFX7-NEXT: v_add_f32_e32 v5, v5, v10 +; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v8 +; GFX7-NEXT: v_alignbit_b32 v6, v7, v6, 16 +; GFX7-NEXT: v_alignbit_b32 v5, v8, v5, 16 +; GFX7-NEXT: v_mov_b32_e32 v9, v6 ; GFX7-NEXT: s_mov_b64 s[12:13], exec -; GFX7-NEXT: v_mov_b32_e32 v6, v4 +; GFX7-NEXT: v_mov_b32_e32 v8, v5 ; GFX7-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX7-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -10538,23 +10362,23 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v8, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB28_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX7-NEXT: s_mov_b64 exec, s[12:13] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v5 -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v6 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8 ; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX7-NEXT: s_cbranch_execnz .LBB28_3 ; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v4 +; GFX7-NEXT: v_mov_b32_e32 v0, v8 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; ; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: @@ -10658,13 +10482,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -10686,7 +10509,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5] ; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7] -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -10701,12 +10524,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1 +; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1 ; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 -; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start @@ -10735,7 +10557,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -10753,10 +10575,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1 ; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 @@ -10787,7 +10608,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -10806,9 +10627,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 @@ -10834,7 +10654,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: v_mov_b32_e32 v1, v6 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -10852,13 +10672,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -10879,7 +10698,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -10896,13 +10715,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -10924,7 +10742,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: v_mov_b32_e32 v1, v6 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -10941,11 +10759,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -10970,7 +10787,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: v_mov_b32_e32 v1, v6 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -10986,7 +10803,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -10995,7 +10811,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 @@ -11010,7 +10826,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX7-NEXT: v_alignbit_b32 v0, v0, v5, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v1 ; GFX7-NEXT: v_mov_b32_e32 v5, v0 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -11092,13 +10908,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB30_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -11119,7 +10934,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] ; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -11135,11 +10950,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start @@ -11165,7 +10978,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -11184,11 +10997,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start @@ -11214,7 +11025,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -11233,12 +11044,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 -; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB30_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -11260,7 +11070,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v6, v1 ; GFX10-NEXT: v_mov_b32_e32 v5, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -11278,13 +11088,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB30_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -11304,7 +11113,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -11321,13 +11130,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB30_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -11348,7 +11156,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v6, v1 ; GFX908-NEXT: v_mov_b32_e32 v5, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -11365,11 +11173,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB30_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -11393,7 +11200,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -11410,7 +11217,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -11419,7 +11225,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB30_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 @@ -11434,7 +11240,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4 @@ -11517,13 +11323,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB31_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -11545,7 +11350,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5] ; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7] -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -11560,12 +11365,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1 +; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1 ; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 -; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start @@ -11594,7 +11398,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -11612,10 +11416,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1 ; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 @@ -11646,7 +11449,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -11665,9 +11468,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 @@ -11693,7 +11495,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: v_mov_b32_e32 v1, v6 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -11711,13 +11513,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB31_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -11738,7 +11539,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -11755,13 +11556,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB31_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -11783,7 +11583,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: v_mov_b32_e32 v1, v6 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -11800,11 +11600,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB31_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -11829,7 +11628,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: v_mov_b32_e32 v1, v6 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -11845,7 +11644,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -11854,7 +11652,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB31_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 @@ -11869,7 +11667,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: v_alignbit_b32 v0, v0, v5, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v1 ; GFX7-NEXT: v_mov_b32_e32 v5, v0 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -11951,13 +11749,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB32_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -11978,7 +11775,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] ; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -11994,11 +11791,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start @@ -12024,7 +11819,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -12043,11 +11838,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start @@ -12073,7 +11866,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -12092,12 +11885,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 -; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB32_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -12119,7 +11911,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v6, v1 ; GFX10-NEXT: v_mov_b32_e32 v5, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -12137,13 +11929,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB32_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -12163,7 +11954,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -12180,13 +11971,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB32_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -12207,7 +11997,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v6, v1 ; GFX908-NEXT: v_mov_b32_e32 v5, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -12224,11 +12014,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB32_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -12252,7 +12041,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -12269,7 +12058,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -12278,7 +12066,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB32_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 @@ -12293,7 +12081,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4 @@ -12375,13 +12163,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB33_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -12402,7 +12189,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] ; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -12418,11 +12205,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start @@ -12448,7 +12233,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -12467,11 +12252,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start @@ -12497,7 +12280,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -12516,12 +12299,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 -; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB33_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -12543,7 +12325,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v6, v1 ; GFX10-NEXT: v_mov_b32_e32 v5, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -12561,13 +12343,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB33_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -12587,7 +12368,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -12604,13 +12385,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB33_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -12631,7 +12411,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v6, v1 ; GFX908-NEXT: v_mov_b32_e32 v5, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -12648,11 +12428,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB33_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -12676,7 +12455,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -12693,7 +12472,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -12702,7 +12480,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB33_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 @@ -12717,7 +12495,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4 @@ -12825,8 +12603,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB34_1: ; %atomicrmw.start @@ -12837,7 +12614,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX10-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -12855,9 +12632,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: v_mov_b32_e32 v2, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB34_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -12865,7 +12641,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] ; GFX90A-NEXT: buffer_wbl2 -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_invl2 ; GFX90A-NEXT: buffer_wbinvl1 @@ -12883,9 +12659,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB34_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -12893,7 +12668,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -12910,9 +12685,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB34_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -12920,7 +12694,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -12937,9 +12711,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX7-NEXT: v_mov_b32_e32 v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB34_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -12947,7 +12720,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX7-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll index f7a1fb3..1a4140c 100644 --- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll +++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll @@ -37,10 +37,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -49,7 +48,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -88,10 +87,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -99,7 +97,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5 ; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -116,10 +114,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -128,7 +125,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -145,10 +142,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -157,7 +153,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -212,10 +208,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v0, v0 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -223,7 +218,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: v_max_f32_e32 v0, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -262,17 +257,16 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v0, v0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_max_f32_e32 v0, v1, v1 ; GFX90A-NEXT: v_max_f32_e32 v0, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -289,10 +283,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v0, v0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -300,7 +293,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_max_f32_e32 v0, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: v_mov_b32_e32 v4, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -317,10 +310,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -328,7 +320,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_max_f32_e32 v0, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: v_mov_b32_e32 v4, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -402,7 +394,6 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX942-NEXT: s_mov_b64 s[2:3], exec ; GFX942-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -414,22 +405,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3] ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] -; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024 -; GFX942-NEXT: ; implicit-def: $vgpr4 +; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB2_1 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: s_mov_b64 exec, s[2:3] ; GFX942-NEXT: s_mov_b64 s[2:3], 0 -; GFX942-NEXT: v_max_f32_e32 v9, v5, v5 +; GFX942-NEXT: v_max_f32_e32 v5, v5, v5 ; GFX942-NEXT: .LBB2_3: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Loop Header: Depth=1 ; GFX942-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_max_f32_e32 v4, v7, v7 -; GFX942-NEXT: v_max_f32_e32 v6, v4, v9 +; GFX942-NEXT: v_max_f32_e32 v6, v9, v9 +; GFX942-NEXT: v_max_f32_e32 v8, v6, v5 ; GFX942-NEXT: s_mov_b64 s[8:9], exec -; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7] +; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9] ; GFX942-NEXT: buffer_wbl2 sc1 ; GFX942-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX942-NEXT: ; => This Inner Loop Header: Depth=2 @@ -443,21 +433,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB2_4 ; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX942-NEXT: s_mov_b64 exec, s[8:9] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v7, v4 +; GFX942-NEXT: v_mov_b32_e32 v9, v6 ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB2_3 ; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX942-NEXT: s_or_b64 exec, exec, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v0, v4 +; GFX942-NEXT: v_mov_b32_e32 v0, v6 ; GFX942-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: @@ -522,7 +512,6 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX90A-NEXT: s_mov_b64 s[6:7], exec ; GFX90A-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -534,22 +523,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX90A-NEXT: ; implicit-def: $vgpr4 +; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024 ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB2_1 ; GFX90A-NEXT: ; %bb.2: ; GFX90A-NEXT: s_mov_b64 exec, s[6:7] ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 -; GFX90A-NEXT: v_max_f32_e32 v9, v5, v5 +; GFX90A-NEXT: v_max_f32_e32 v5, v5, v5 ; GFX90A-NEXT: .LBB2_3: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_max_f32_e32 v4, v7, v7 -; GFX90A-NEXT: v_max_f32_e32 v6, v4, v9 +; GFX90A-NEXT: v_max_f32_e32 v6, v9, v9 +; GFX90A-NEXT: v_max_f32_e32 v8, v6, v5 ; GFX90A-NEXT: s_mov_b64 s[12:13], exec -; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1] ; GFX90A-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -561,27 +549,26 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB2_4 ; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX90A-NEXT: s_mov_b64 exec, s[12:13] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v7, v4 +; GFX90A-NEXT: v_mov_b32_e32 v9, v6 ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB2_3 ; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v0, v4 +; GFX90A-NEXT: v_mov_b32_e32 v0, v6 ; GFX90A-NEXT: s_setpc_b64 s[30:31] ; ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -593,8 +580,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 -; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 +; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB2_1 ; GFX908-NEXT: ; %bb.2: @@ -605,11 +591,11 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: ; =>This Loop Header: Depth=1 ; GFX908-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_max_f32_e32 v4, v6, v6 -; GFX908-NEXT: v_max_f32_e32 v5, v4, v8 -; GFX908-NEXT: v_mov_b32_e32 v4, v5 -; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_max_f32_e32 v5, v7, v7 +; GFX908-NEXT: v_max_f32_e32 v6, v5, v8 ; GFX908-NEXT: v_mov_b32_e32 v5, v6 +; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_mov_b32_e32 v6, v7 ; GFX908-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -621,27 +607,26 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB2_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX908-NEXT: s_mov_b64 exec, s[12:13] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v6, v4 +; GFX908-NEXT: v_mov_b32_e32 v7, v5 ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB2_3 ; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v0, v4 +; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -653,8 +638,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 -; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 +; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB2_1 ; GFX8-NEXT: ; %bb.2: @@ -665,11 +649,11 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v6 -; GFX8-NEXT: v_max_f32_e32 v5, v4, v8 -; GFX8-NEXT: v_mov_b32_e32 v4, v5 -; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mul_f32_e32 v5, 1.0, v7 +; GFX8-NEXT: v_max_f32_e32 v6, v5, v8 ; GFX8-NEXT: v_mov_b32_e32 v5, v6 +; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -681,21 +665,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB2_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX8-NEXT: s_mov_b64 exec, s[12:13] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v7, v5 ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB2_3 ; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v0, v4 +; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: @@ -777,10 +761,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -789,7 +772,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX942-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -804,11 +787,10 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_max_f32 v2, v1, v1 -; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_dual_mov_b32 v3, s16 :: v_dual_max_f32 v2, v1, v1 +; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) @@ -819,7 +801,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX11-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -837,11 +819,10 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: v_max_f32_e32 v2, v1, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -851,7 +832,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -869,10 +850,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -880,7 +860,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5 ; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -897,10 +877,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -909,7 +888,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -926,10 +905,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -938,7 +916,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -955,10 +933,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_mov_b32_e32 v1, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -967,7 +944,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1035,10 +1012,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -1047,7 +1023,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1086,10 +1062,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -1097,7 +1072,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5 ; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1114,10 +1089,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -1126,7 +1100,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1143,10 +1117,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -1155,7 +1128,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1201,29 +1174,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1246,30 +1217,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1301,30 +1270,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB5_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1334,30 +1300,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB5_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1397,11 +1360,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_mov_b32_e32 v2, s16 ; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1] -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v6, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 @@ -1411,7 +1372,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[4:5] ; GFX12-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2 ; GFX12-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3] @@ -1440,11 +1401,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v2, s16 ; GFX11-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1] -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v6, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) @@ -1454,7 +1413,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5] ; GFX11-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2 ; GFX11-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -1494,9 +1453,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v2, s20 ; GFX908-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v6, s20 ; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -1506,7 +1464,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v9, v2 ; GFX908-NEXT: v_mov_b32_e32 v8, v1 ; GFX908-NEXT: v_mov_b32_e32 v7, v0 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3] @@ -1525,9 +1483,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v2, s20 ; GFX8-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v6, s20 ; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -1537,7 +1494,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v9, v2 ; GFX8-NEXT: v_mov_b32_e32 v8, v1 ; GFX8-NEXT: v_mov_b32_e32 v7, v0 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3] @@ -1583,10 +1540,9 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 ; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 -; GFX12-NEXT: v_add_nc_u32_e32 v15, 0x800, v4 ; GFX12-NEXT: s_mov_b32 s1, exec_lo ; GFX12-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-NEXT: v_readfirstlane_b32 s4, v9 ; GFX12-NEXT: v_readfirstlane_b32 s5, v10 ; GFX12-NEXT: v_readfirstlane_b32 s6, v7 @@ -1600,12 +1556,11 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], null offen offset:2048 -; GFX12-NEXT: ; implicit-def: $vgpr4 ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB7_1 ; GFX12-NEXT: ; %bb.2: ; GFX12-NEXT: s_mov_b32 exec_lo, s1 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[5:6], v[5:6] +; GFX12-NEXT: v_max_num_f64_e32 v[5:6], v[5:6], v[5:6] ; GFX12-NEXT: s_mov_b32 s1, 0 ; GFX12-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Loop Header: Depth=1 @@ -1615,7 +1570,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_mov_b32 s2, exec_lo ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[11:12], v[0:1], v[4:5] +; GFX12-NEXT: v_max_num_f64_e32 v[11:12], v[0:1], v[5:6] ; GFX12-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12 ; GFX12-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14 ; GFX12-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1 @@ -1632,7 +1587,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB7_4 ; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -1686,27 +1641,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 ; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 -; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x800, v4 ; GFX11-NEXT: s_mov_b32 s1, 0 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3) +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_readfirstlane_b32 s4, v9 ; GFX11-NEXT: v_readfirstlane_b32 s5, v10 ; GFX11-NEXT: v_readfirstlane_b32 s6, v7 ; GFX11-NEXT: v_readfirstlane_b32 s7, v8 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[9:10] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[7:8] ; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], 0 offen offset:2048 -; GFX11-NEXT: ; implicit-def: $vgpr4 ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB7_1 ; GFX11-NEXT: ; %bb.2: ; GFX11-NEXT: s_mov_b32 exec_lo, s2 -; GFX11-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6] +; GFX11-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6] ; GFX11-NEXT: .p2align 6 ; GFX11-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Loop Header: Depth=1 @@ -1716,7 +1670,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[11:12], v[0:1], v[4:5] +; GFX11-NEXT: v_max_f64 v[11:12], v[0:1], v[5:6] ; GFX11-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12 ; GFX11-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14 ; GFX11-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1 @@ -1732,7 +1686,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], 0 offen offset:2048 glc ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB7_4 ; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -1816,7 +1770,6 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: v_mov_b32_e32 v7, v2 ; GFX908-NEXT: v_mov_b32_e32 v10, v1 ; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_add_u32_e32 v15, 0x800, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v9 @@ -1829,12 +1782,11 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 ; GFX908-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX908-NEXT: ; implicit-def: $vgpr4 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB7_1 ; GFX908-NEXT: ; %bb.2: ; GFX908-NEXT: s_mov_b64 exec, s[6:7] -; GFX908-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6] +; GFX908-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6] ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Loop Header: Depth=1 @@ -1842,7 +1794,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14] ; GFX908-NEXT: s_mov_b64 s[12:13], exec -; GFX908-NEXT: v_max_f64 v[11:12], v[0:1], v[4:5] +; GFX908-NEXT: v_max_f64 v[11:12], v[0:1], v[5:6] ; GFX908-NEXT: v_mov_b32_e32 v0, v11 ; GFX908-NEXT: v_mov_b32_e32 v1, v12 ; GFX908-NEXT: v_mov_b32_e32 v2, v13 @@ -1858,7 +1810,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB7_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -1882,7 +1834,6 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: v_mov_b32_e32 v7, v2 ; GFX8-NEXT: v_mov_b32_e32 v10, v1 ; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_add_u32_e32 v15, vcc, 0x800, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v9 @@ -1895,12 +1846,11 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 ; GFX8-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX8-NEXT: ; implicit-def: $vgpr4 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB7_1 ; GFX8-NEXT: ; %bb.2: ; GFX8-NEXT: s_mov_b64 exec, s[6:7] -; GFX8-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6] +; GFX8-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6] ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Loop Header: Depth=1 @@ -1908,7 +1858,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14] ; GFX8-NEXT: s_mov_b64 s[12:13], exec -; GFX8-NEXT: v_max_f64 v[11:12], v[0:1], v[4:5] +; GFX8-NEXT: v_max_f64 v[11:12], v[0:1], v[5:6] ; GFX8-NEXT: v_mov_b32_e32 v0, v11 ; GFX8-NEXT: v_mov_b32_e32 v1, v12 ; GFX8-NEXT: v_mov_b32_e32 v2, v13 @@ -1924,7 +1874,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB7_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -2008,29 +1958,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2053,30 +2001,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2088,31 +2034,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v2, v0 -; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: v_mov_b32_e32 v3, v1 -; GFX10-NEXT: s_add_i32 s4, s20, 0x800 -; GFX10-NEXT: v_mov_b32_e32 v6, s4 -; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX10-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX10-NEXT: v_mov_b32_e32 v2, s20 +; GFX10-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX10-NEXT: v_mov_b32_e32 v8, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v10, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, v0 +; GFX10-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX10-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX10-NEXT: v_mov_b32_e32 v0, v7 -; GFX10-NEXT: v_mov_b32_e32 v1, v8 -; GFX10-NEXT: v_mov_b32_e32 v2, v9 -; GFX10-NEXT: v_mov_b32_e32 v3, v10 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX10-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX10-NEXT: v_mov_b32_e32 v0, v2 +; GFX10-NEXT: v_mov_b32_e32 v1, v3 +; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_mov_b32_e32 v3, v5 +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX10-NEXT: v_mov_b32_e32 v5, v1 +; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB8_1 @@ -2123,27 +2066,24 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_mov_b32_e32 v2, v0 -; GFX90A-NEXT: v_mov_b32_e32 v0, s20 -; GFX90A-NEXT: v_mov_b32_e32 v3, v1 -; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x800 +; GFX90A-NEXT: v_mov_b32_e32 v2, s20 +; GFX90A-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX90A-NEXT: v_mov_b32_e32 v6, s6 +; GFX90A-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX90A-NEXT: v_mov_b32_e32 v8, s20 ; GFX90A-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX90A-NEXT: v_max_f64 v[8:9], v[0:1], v[4:5] -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1] -; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX90A-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX90A-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1] +; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 -; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1] ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB8_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2153,30 +2093,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB8_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2186,30 +2123,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB8_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2219,30 +2153,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v2, v0 -; GFX7-NEXT: v_mov_b32_e32 v0, s20 -; GFX7-NEXT: v_mov_b32_e32 v3, v1 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX7-NEXT: s_add_i32 s6, s20, 0x800 -; GFX7-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX7-NEXT: v_mov_b32_e32 v2, s20 +; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s6 +; GFX7-NEXT: v_mov_b32_e32 v8, s20 ; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v10, v1 -; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX7-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v8 -; GFX7-NEXT: v_mov_b32_e32 v2, v9 -; GFX7-NEXT: v_mov_b32_e32 v3, v10 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX7-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX7-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX7-NEXT: v_mov_b32_e32 v0, v2 +; GFX7-NEXT: v_mov_b32_e32 v1, v3 +; GFX7-NEXT: v_mov_b32_e32 v2, v4 +; GFX7-NEXT: v_mov_b32_e32 v3, v5 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v5, v1 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: v_mov_b32_e32 v4, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB8_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2252,31 +2183,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v2, v0 -; GFX6-NEXT: v_mov_b32_e32 v0, s20 -; GFX6-NEXT: v_mov_b32_e32 v3, v1 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX6-NEXT: v_mov_b32_e32 v2, s20 +; GFX6-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX6-NEXT: s_add_i32 s6, s20, 0x800 -; GFX6-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX6-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX6-NEXT: s_mov_b64 s[4:5], 0 -; GFX6-NEXT: v_mov_b32_e32 v6, s6 +; GFX6-NEXT: v_mov_b32_e32 v8, s6 ; GFX6-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v10, v1 -; GFX6-NEXT: v_mov_b32_e32 v9, v0 -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX6-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX6-NEXT: v_mov_b32_e32 v0, v7 -; GFX6-NEXT: v_mov_b32_e32 v1, v8 -; GFX6-NEXT: v_mov_b32_e32 v2, v9 -; GFX6-NEXT: v_mov_b32_e32 v3, v10 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; GFX6-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX6-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX6-NEXT: v_mov_b32_e32 v0, v2 +; GFX6-NEXT: v_mov_b32_e32 v1, v3 +; GFX6-NEXT: v_mov_b32_e32 v2, v4 +; GFX6-NEXT: v_mov_b32_e32 v3, v5 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX6-NEXT: v_mov_b32_e32 v5, v1 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v4, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB8_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2296,29 +2224,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2341,30 +2267,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2396,30 +2320,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB9_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2429,30 +2350,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB9_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6146,13 +6064,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v3, s4 +; GFX12-NEXT: v_mov_b32_e32 v3, s16 +; GFX12-NEXT: s_mov_b32 s4, 0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX12-NEXT: v_pk_max_num_f16 v2, v1, v1 ; GFX12-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024 -; GFX12-NEXT: s_mov_b32 s4, 0 ; GFX12-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 @@ -6163,7 +6079,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX12-NEXT: v_pk_max_num_f16 v4, v0, v2 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5 @@ -6182,10 +6098,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_pk_max_f16 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -6195,7 +6110,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX942-NEXT: v_pk_max_f16 v4, v0, v2 ; GFX942-NEXT: s_nop 0 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -6210,12 +6125,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_mov_b32_e32 v3, s4 +; GFX11-NEXT: v_mov_b32_e32 v3, s16 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_pk_max_f16 v2, v1, v1 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 -; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) @@ -6226,7 +6140,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX11-NEXT: v_pk_max_f16 v4, v0, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -6244,11 +6158,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: v_pk_max_f16 v2, v1, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -6258,7 +6171,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: v_pk_max_f16 v4, v0, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -6276,10 +6189,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_pk_max_f16 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -6287,7 +6199,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX90A-NEXT: v_pk_max_f16 v0, v5, v5 ; GFX90A-NEXT: v_pk_max_f16 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -6304,10 +6216,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_pk_max_f16 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -6316,7 +6227,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_pk_max_f16 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -6333,11 +6244,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_max_f16_sdwa v2, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 ; GFX8-NEXT: v_max_f16_e32 v3, v1, v1 -; GFX8-NEXT: v_mov_b32_e32 v4, s6 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -6349,7 +6259,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_or_b32_e32 v5, v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: v_mov_b32_e32 v1, v6 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -6367,7 +6277,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -6375,7 +6284,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -6392,7 +6301,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_or_b32_e32 v5, v7, v0 ; GFX7-NEXT: v_mov_b32_e32 v8, v6 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7 @@ -6467,10 +6376,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_mov_b32_e32 v1, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400 ; GFX12-NEXT: v_pk_max_num_f16 v2, v0, v0 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v3, s4 +; GFX12-NEXT: v_mov_b32_e32 v3, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 ; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 ; GFX12-NEXT: .LBB17_1: ; %atomicrmw.start @@ -6481,7 +6388,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v2 ; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 -; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1 @@ -6500,10 +6407,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -6512,7 +6418,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX942-NEXT: v_pk_max_f16 v0, v0, v2 ; GFX942-NEXT: s_nop 0 ; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -6528,9 +6434,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v1, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 ; GFX11-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX11-NEXT: v_mov_b32_e32 v3, s4 +; GFX11-NEXT: v_mov_b32_e32 v3, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start @@ -6541,7 +6446,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_pk_max_f16 v0, v0, v2 ; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -6559,9 +6464,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start @@ -6572,7 +6476,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX10-NEXT: v_pk_max_f16 v0, v0, v2 ; GFX10-NEXT: v_mov_b32_e32 v5, v1 ; GFX10-NEXT: v_mov_b32_e32 v4, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -6590,17 +6494,16 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_pk_max_f16 v0, v1, v1 ; GFX90A-NEXT: v_pk_max_f16 v0, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -6617,10 +6520,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -6628,7 +6530,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX908-NEXT: v_pk_max_f16 v0, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: v_mov_b32_e32 v4, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -6645,11 +6547,10 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_max_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 ; GFX8-NEXT: v_max_f16_e32 v3, v0, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s6 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -6660,7 +6561,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX8-NEXT: v_or_b32_e32 v0, v5, v0 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -6679,7 +6580,6 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -6687,7 +6587,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2 ; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 @@ -6704,7 +6604,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: v_or_b32_e32 v4, v6, v3 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6 @@ -6778,7 +6678,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX12-NEXT: s_mov_b32 s1, exec_lo ; GFX12-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: v_readfirstlane_b32 s4, v0 @@ -6793,8 +6692,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024 -; GFX12-NEXT: ; implicit-def: $vgpr4 +; GFX12-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024 ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB18_1 ; GFX12-NEXT: ; %bb.2: @@ -6805,13 +6703,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX12-NEXT: ; =>This Loop Header: Depth=1 ; GFX12-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_pk_max_num_f16 v4, v6, v6 +; GFX12-NEXT: v_pk_max_num_f16 v5, v7, v7 ; GFX12-NEXT: s_mov_b32 s2, exec_lo ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_pk_max_num_f16 v5, v4, v8 -; GFX12-NEXT: v_mov_b32_e32 v4, v5 +; GFX12-NEXT: v_pk_max_num_f16 v6, v5, v8 ; GFX12-NEXT: v_mov_b32_e32 v5, v6 +; GFX12-NEXT: v_mov_b32_e32 v6, v7 ; GFX12-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX12-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX12-NEXT: v_readfirstlane_b32 s4, v0 @@ -6826,14 +6724,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB18_4 ; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX12-NEXT: s_mov_b32 exec_lo, s2 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX12-NEXT: v_mov_b32_e32 v6, v4 +; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX12-NEXT: v_mov_b32_e32 v7, v5 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -6841,14 +6739,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_cbranch_execnz .LBB18_3 ; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX12-NEXT: v_mov_b32_e32 v0, v4 +; GFX12-NEXT: v_mov_b32_e32 v0, v5 ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; ; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX942-NEXT: s_mov_b64 s[2:3], exec ; GFX942-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -6860,23 +6757,22 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3] ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] -; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024 -; GFX942-NEXT: ; implicit-def: $vgpr4 +; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB18_1 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: s_mov_b64 exec, s[2:3] ; GFX942-NEXT: s_mov_b64 s[2:3], 0 -; GFX942-NEXT: v_pk_max_f16 v9, v5, v5 +; GFX942-NEXT: v_pk_max_f16 v5, v5, v5 ; GFX942-NEXT: .LBB18_3: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Loop Header: Depth=1 ; GFX942-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_pk_max_f16 v4, v7, v7 +; GFX942-NEXT: v_pk_max_f16 v6, v9, v9 ; GFX942-NEXT: s_mov_b64 s[8:9], exec -; GFX942-NEXT: v_pk_max_f16 v6, v4, v9 +; GFX942-NEXT: v_pk_max_f16 v8, v6, v5 ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7] +; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9] ; GFX942-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX942-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -6889,27 +6785,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB18_4 ; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX942-NEXT: s_mov_b64 exec, s[8:9] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v7, v4 +; GFX942-NEXT: v_mov_b32_e32 v9, v6 ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB18_3 ; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX942-NEXT: s_or_b64 exec, exec, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v0, v4 +; GFX942-NEXT: v_mov_b32_e32 v0, v6 ; GFX942-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-NEXT: s_mov_b32 s1, 0 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 @@ -6923,8 +6818,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-NEXT: ; implicit-def: $vgpr4 +; GFX11-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB18_1 ; GFX11-NEXT: ; %bb.2: @@ -6935,13 +6829,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX11-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_pk_max_f16 v4, v6, v6 +; GFX11-NEXT: v_pk_max_f16 v5, v7, v7 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_pk_max_f16 v5, v4, v8 -; GFX11-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-NEXT: v_pk_max_f16 v6, v5, v8 ; GFX11-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX11-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-NEXT: v_readfirstlane_b32 s4, v0 @@ -6955,14 +6849,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB18_4 ; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX11-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv ; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -6971,13 +6865,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_cbranch_execnz .LBB18_3 ; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 @@ -6989,8 +6882,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3] ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 -; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX10-NEXT: ; implicit-def: $vgpr4 +; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB18_1 @@ -7001,12 +6893,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX10-NEXT: ; =>This Loop Header: Depth=1 ; GFX10-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_pk_max_f16 v4, v6, v6 +; GFX10-NEXT: v_pk_max_f16 v5, v7, v7 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_pk_max_f16 v5, v4, v8 -; GFX10-NEXT: v_mov_b32_e32 v4, v5 +; GFX10-NEXT: v_pk_max_f16 v6, v5, v8 ; GFX10-NEXT: v_mov_b32_e32 v5, v6 +; GFX10-NEXT: v_mov_b32_e32 v6, v7 ; GFX10-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX10-NEXT: v_readfirstlane_b32 s8, v0 @@ -7018,15 +6910,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB18_4 ; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX10-NEXT: s_mov_b32 exec_lo, s6 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX10-NEXT: v_mov_b32_e32 v7, v5 ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 @@ -7035,13 +6927,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX10-NEXT: s_cbranch_execnz .LBB18_3 ; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: v_mov_b32_e32 v0, v4 +; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX90A-NEXT: s_mov_b64 s[6:7], exec ; GFX90A-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -7053,22 +6944,21 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX90A-NEXT: ; implicit-def: $vgpr4 +; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024 ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB18_1 ; GFX90A-NEXT: ; %bb.2: ; GFX90A-NEXT: s_mov_b64 exec, s[6:7] ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 -; GFX90A-NEXT: v_pk_max_f16 v9, v5, v5 +; GFX90A-NEXT: v_pk_max_f16 v5, v5, v5 ; GFX90A-NEXT: .LBB18_3: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_pk_max_f16 v4, v7, v7 -; GFX90A-NEXT: v_pk_max_f16 v6, v4, v9 +; GFX90A-NEXT: v_pk_max_f16 v6, v9, v9 +; GFX90A-NEXT: v_pk_max_f16 v8, v6, v5 ; GFX90A-NEXT: s_mov_b64 s[12:13], exec -; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1] ; GFX90A-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -7080,27 +6970,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB18_4 ; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX90A-NEXT: s_mov_b64 exec, s[12:13] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v7, v4 +; GFX90A-NEXT: v_mov_b32_e32 v9, v6 ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB18_3 ; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v0, v4 +; GFX90A-NEXT: v_mov_b32_e32 v0, v6 ; GFX90A-NEXT: s_setpc_b64 s[30:31] ; ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -7112,8 +7001,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 -; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 +; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB18_1 ; GFX908-NEXT: ; %bb.2: @@ -7124,11 +7012,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX908-NEXT: ; =>This Loop Header: Depth=1 ; GFX908-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_pk_max_f16 v4, v6, v6 -; GFX908-NEXT: v_pk_max_f16 v5, v4, v8 -; GFX908-NEXT: v_mov_b32_e32 v4, v5 -; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_pk_max_f16 v5, v7, v7 +; GFX908-NEXT: v_pk_max_f16 v6, v5, v8 ; GFX908-NEXT: v_mov_b32_e32 v5, v6 +; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_mov_b32_e32 v6, v7 ; GFX908-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -7140,27 +7028,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB18_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX908-NEXT: s_mov_b64 exec, s[12:13] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v6, v4 +; GFX908-NEXT: v_mov_b32_e32 v7, v5 ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB18_3 ; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v0, v4 +; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -7172,8 +7059,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 -; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 +; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB18_1 ; GFX8-NEXT: ; %bb.2: @@ -7185,14 +7071,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_max_f16_sdwa v4, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_max_f16_e32 v5, v6, v6 -; GFX8-NEXT: v_max_f16_sdwa v4, v4, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX8-NEXT: v_max_f16_e32 v5, v5, v9 -; GFX8-NEXT: v_or_b32_e32 v5, v5, v4 -; GFX8-NEXT: v_mov_b32_e32 v4, v5 -; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_max_f16_sdwa v5, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_max_f16_e32 v6, v7, v7 +; GFX8-NEXT: v_max_f16_sdwa v5, v5, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_max_f16_e32 v6, v6, v9 +; GFX8-NEXT: v_or_b32_e32 v6, v6, v5 ; GFX8-NEXT: v_mov_b32_e32 v5, v6 +; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -7204,27 +7090,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB18_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX8-NEXT: s_mov_b64 exec, s[12:13] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v7, v5 ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB18_3 ; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v0, v4 +; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -7235,39 +7120,38 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3] ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] -; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX7-NEXT: ; implicit-def: $vgpr4 +; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB18_1 ; GFX7-NEXT: ; %bb.2: ; GFX7-NEXT: s_mov_b64 exec, s[6:7] ; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6 -; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v5 +; GFX7-NEXT: v_cvt_f16_f32_e32 v9, v5 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8 ; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5 ; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v6 -; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v9 ; GFX7-NEXT: s_mov_b64 s[6:7], 0 ; GFX7-NEXT: .LBB18_3: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Loop Header: Depth=1 ; GFX7-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5 -; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 +; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v7 ; GFX7-NEXT: s_mov_b64 s[12:13], exec -; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5 -; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v4 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v5 +; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v6 ; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX7-NEXT: v_max_f32_e32 v6, v6, v10 -; GFX7-NEXT: v_max_f32_e32 v7, v7, v11 -; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v6 +; GFX7-NEXT: v_or_b32_e32 v6, v6, v5 +; GFX7-NEXT: v_max_f32_e32 v7, v7, v10 +; GFX7-NEXT: v_max_f32_e32 v8, v8, v11 ; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7 -; GFX7-NEXT: v_or_b32_e32 v6, v4, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; GFX7-NEXT: v_or_b32_e32 v5, v7, v4 -; GFX7-NEXT: v_mov_b32_e32 v8, v6 -; GFX7-NEXT: v_mov_b32_e32 v7, v5 +; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX7-NEXT: v_or_b32_e32 v5, v8, v5 +; GFX7-NEXT: v_mov_b32_e32 v9, v6 +; GFX7-NEXT: v_mov_b32_e32 v8, v5 ; GFX7-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX7-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -7279,23 +7163,23 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v9, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB18_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX7-NEXT: s_mov_b64 exec, s[12:13] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8 ; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v6 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6 ; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX7-NEXT: s_cbranch_execnz .LBB18_3 ; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v4 +; GFX7-NEXT: v_mov_b32_e32 v0, v7 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -7396,13 +7280,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe +; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1 +; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1 ; GFX12-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024 ; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 -; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 @@ -7431,7 +7313,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6 @@ -7452,11 +7334,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400 ; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0 -; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1 +; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1 ; GFX12-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024 ; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start @@ -7487,7 +7367,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX12-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6 @@ -7506,13 +7386,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -7534,7 +7413,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5] ; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7] -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -7549,12 +7428,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1 +; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1 ; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 -; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start @@ -7583,7 +7461,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -7601,10 +7479,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1 ; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 @@ -7635,7 +7512,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -7654,9 +7531,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 @@ -7682,7 +7558,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: v_mov_b32_e32 v1, v6 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -7700,13 +7576,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -7727,7 +7602,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -7744,13 +7619,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -7772,7 +7646,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: v_mov_b32_e32 v1, v6 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -7789,11 +7663,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -7818,7 +7691,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: v_mov_b32_e32 v1, v6 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -7834,7 +7707,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -7843,7 +7715,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 @@ -7858,7 +7730,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: v_alignbit_b32 v0, v0, v6, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v1 ; GFX7-NEXT: v_mov_b32_e32 v5, v0 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -7928,11 +7800,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe -; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 +; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 ; GFX12-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 @@ -7958,7 +7828,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1 @@ -7980,11 +7850,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe -; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 +; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 ; GFX12-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 @@ -8010,7 +7878,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX12-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1 @@ -8029,13 +7897,12 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -8056,7 +7923,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] ; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -8072,11 +7939,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start @@ -8102,7 +7967,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -8121,11 +7986,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start @@ -8151,7 +8014,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -8170,12 +8033,11 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 -; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -8197,7 +8059,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v6, v1 ; GFX10-NEXT: v_mov_b32_e32 v5, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -8215,13 +8077,12 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -8241,7 +8102,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -8258,13 +8119,12 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -8285,7 +8145,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v6, v1 ; GFX908-NEXT: v_mov_b32_e32 v5, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -8302,11 +8162,10 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -8330,7 +8189,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -8347,7 +8206,6 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v0 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -8356,7 +8214,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 @@ -8371,7 +8229,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: v_alignbit_b32 v3, v3, v6, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4 @@ -8440,7 +8298,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo ; GFX12-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8455,8 +8312,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024 -; GFX12-TRUE16-NEXT: ; implicit-def: $vgpr4 +; GFX12-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024 ; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX12-TRUE16-NEXT: ; %bb.2: @@ -8468,30 +8324,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v5, v5, v8 :: v_dual_max_num_f32 v4, v4, v9 -; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v6, v6, v8 :: v_dual_max_num_f32 v5, v5, v9 +; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v5 ; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX12-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8506,14 +8362,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -8521,7 +8377,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_3 ; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -8532,7 +8388,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo ; GFX12-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8547,8 +8402,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024 -; GFX12-FAKE16-NEXT: ; implicit-def: $vgpr4 +; GFX12-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024 ; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX12-FAKE16-NEXT: ; %bb.2: @@ -8560,30 +8414,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v5, v5, v9 :: v_dual_max_num_f32 v4, v4, v8 -; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v6, v6, v9 :: v_dual_max_num_f32 v5, v5, v8 +; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff +; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo -; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo +; GFX12-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v5 ; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX12-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8598,14 +8452,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -8613,14 +8467,13 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_3 ; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX942-NEXT: s_mov_b64 s[2:3], exec ; GFX942-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -8632,40 +8485,39 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3] ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] -; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024 -; GFX942-NEXT: ; implicit-def: $vgpr4 +; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB21_1 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: s_mov_b64 exec, s[2:3] ; GFX942-NEXT: s_mov_b64 s[2:3], 0 -; GFX942-NEXT: v_lshlrev_b32_e32 v9, 16, v5 +; GFX942-NEXT: v_lshlrev_b32_e32 v10, 16, v5 ; GFX942-NEXT: s_movk_i32 s10, 0x7fff -; GFX942-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX942-NEXT: s_mov_b32 s11, 0x7060302 ; GFX942-NEXT: .LBB21_3: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Loop Header: Depth=1 ; GFX942-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v7 -; GFX942-NEXT: v_max_f32_e32 v4, v4, v9 -; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX942-NEXT: v_add3_u32 v5, v5, v4, s10 -; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX942-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX942-NEXT: v_max_f32_e32 v6, v6, v10 +; GFX942-NEXT: v_bfe_u32 v7, v6, 16, 1 +; GFX942-NEXT: v_add3_u32 v7, v7, v6, s10 +; GFX942-NEXT: v_or_b32_e32 v8, 0x400000, v6 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 ; GFX942-NEXT: s_mov_b64 s[8:9], exec ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v7 -; GFX942-NEXT: v_max_f32_e32 v5, v5, v10 -; GFX942-NEXT: v_bfe_u32 v6, v5, 16, 1 -; GFX942-NEXT: v_add3_u32 v6, v6, v5, s10 -; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX942-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc +; GFX942-NEXT: v_and_b32_e32 v7, 0xffff0000, v9 +; GFX942-NEXT: v_max_f32_e32 v7, v7, v5 +; GFX942-NEXT: v_bfe_u32 v8, v7, 16, 1 +; GFX942-NEXT: v_add3_u32 v8, v8, v7, s10 +; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc -; GFX942-NEXT: v_perm_b32 v6, v5, v4, s11 -; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7] +; GFX942-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc +; GFX942-NEXT: v_perm_b32 v8, v7, v6, s11 +; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9] ; GFX942-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX942-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -8678,27 +8530,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB21_4 ; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX942-NEXT: s_mov_b64 exec, s[8:9] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v7, v4 +; GFX942-NEXT: v_mov_b32_e32 v9, v6 ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB21_3 ; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX942-NEXT: s_or_b64 exec, exec, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v0, v4 +; GFX942-NEXT: v_mov_b32_e32 v0, v6 ; GFX942-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0 ; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -8712,8 +8563,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4 +; GFX11-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX11-TRUE16-NEXT: ; %bb.2: @@ -8726,28 +8576,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_max_f32 v5, v5, v8 :: v_dual_max_f32 v4, v4, v9 -; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11-TRUE16-NEXT: v_dual_max_f32 v6, v6, v8 :: v_dual_max_f32 v5, v5, v9 +; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo +; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8761,14 +8611,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv ; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -8778,13 +8628,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0 ; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -8798,8 +8647,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4 +; GFX11-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX11-FAKE16-NEXT: ; %bb.2: @@ -8812,28 +8660,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_max_f32 v5, v5, v9 :: v_dual_max_f32 v4, v4, v8 -; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11-FAKE16-NEXT: v_dual_max_f32 v6, v6, v9 :: v_dual_max_f32 v5, v5, v8 +; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo +; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8847,14 +8695,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv ; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -8864,13 +8712,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2 ; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -8882,8 +8729,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3] ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 -; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX10-NEXT: ; implicit-def: $vgpr4 +; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB21_1 @@ -8895,25 +8741,25 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX10-NEXT: ; =>This Loop Header: Depth=1 ; GFX10-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_max_f32_e32 v4, v4, v8 -; GFX10-NEXT: v_max_f32_e32 v5, v5, v9 -; GFX10-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX10-NEXT: v_bfe_u32 v11, v5, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX10-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX10-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX10-NEXT: v_max_f32_e32 v5, v5, v8 +; GFX10-NEXT: v_max_f32_e32 v6, v6, v9 +; GFX10-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX10-NEXT: v_bfe_u32 v11, v6, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX10-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo -; GFX10-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 -; GFX10-NEXT: v_mov_b32_e32 v4, v5 +; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX10-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX10-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX10-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo +; GFX10-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v5, v6 +; GFX10-NEXT: v_mov_b32_e32 v6, v7 ; GFX10-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX10-NEXT: v_readfirstlane_b32 s8, v0 @@ -8925,15 +8771,15 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB21_4 ; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX10-NEXT: s_mov_b32 exec_lo, s6 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX10-NEXT: v_mov_b32_e32 v7, v5 ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 @@ -8942,13 +8788,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX10-NEXT: s_cbranch_execnz .LBB21_3 ; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: v_mov_b32_e32 v0, v4 +; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX90A-NEXT: s_mov_b64 s[6:7], exec ; GFX90A-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -8960,38 +8805,37 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX90A-NEXT: ; implicit-def: $vgpr4 +; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024 ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB21_1 ; GFX90A-NEXT: ; %bb.2: ; GFX90A-NEXT: s_mov_b64 exec, s[6:7] ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 -; GFX90A-NEXT: v_lshlrev_b32_e32 v9, 16, v5 +; GFX90A-NEXT: v_lshlrev_b32_e32 v10, 16, v5 ; GFX90A-NEXT: s_movk_i32 s14, 0x7fff -; GFX90A-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX90A-NEXT: s_mov_b32 s15, 0x7060302 ; GFX90A-NEXT: .LBB21_3: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v7 -; GFX90A-NEXT: v_max_f32_e32 v4, v4, v9 -; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s14 -; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v7 -; GFX90A-NEXT: v_max_f32_e32 v5, v5, v10 -; GFX90A-NEXT: v_bfe_u32 v6, v5, 16, 1 -; GFX90A-NEXT: v_add3_u32 v6, v6, v5, s14 -; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX90A-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc -; GFX90A-NEXT: v_perm_b32 v6, v5, v4, s15 +; GFX90A-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX90A-NEXT: v_max_f32_e32 v6, v6, v10 +; GFX90A-NEXT: v_bfe_u32 v7, v6, 16, 1 +; GFX90A-NEXT: v_add3_u32 v7, v7, v6, s14 +; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX90A-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc +; GFX90A-NEXT: v_and_b32_e32 v7, 0xffff0000, v9 +; GFX90A-NEXT: v_max_f32_e32 v7, v7, v5 +; GFX90A-NEXT: v_bfe_u32 v8, v7, 16, 1 +; GFX90A-NEXT: v_add3_u32 v8, v8, v7, s14 +; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX90A-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc +; GFX90A-NEXT: v_perm_b32 v8, v7, v6, s15 ; GFX90A-NEXT: s_mov_b64 s[12:13], exec -; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1] ; GFX90A-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -9003,27 +8847,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB21_4 ; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX90A-NEXT: s_mov_b64 exec, s[12:13] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v7, v4 +; GFX90A-NEXT: v_mov_b32_e32 v9, v6 ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB21_3 ; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v0, v4 +; GFX90A-NEXT: v_mov_b32_e32 v0, v6 ; GFX90A-NEXT: s_setpc_b64 s[30:31] ; ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -9035,8 +8878,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 -; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 +; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB21_1 ; GFX908-NEXT: ; %bb.2: @@ -9050,24 +8892,24 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX908-NEXT: ; =>This Loop Header: Depth=1 ; GFX908-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX908-NEXT: v_max_f32_e32 v4, v4, v8 -; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX908-NEXT: v_add3_u32 v5, v5, v4, s14 -; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v4 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc -; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX908-NEXT: v_max_f32_e32 v5, v5, v9 -; GFX908-NEXT: v_bfe_u32 v10, v5, 16, 1 -; GFX908-NEXT: v_add3_u32 v10, v10, v5, s14 -; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v5 +; GFX908-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX908-NEXT: v_max_f32_e32 v5, v5, v8 +; GFX908-NEXT: v_bfe_u32 v6, v5, 16, 1 +; GFX908-NEXT: v_add3_u32 v6, v6, v5, s14 +; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v5 ; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX908-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc -; GFX908-NEXT: v_perm_b32 v5, v5, v4, s15 -; GFX908-NEXT: v_mov_b32_e32 v4, v5 -; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc +; GFX908-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX908-NEXT: v_max_f32_e32 v6, v6, v9 +; GFX908-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX908-NEXT: v_add3_u32 v10, v10, v6, s14 +; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX908-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX908-NEXT: v_perm_b32 v6, v6, v5, s15 ; GFX908-NEXT: v_mov_b32_e32 v5, v6 +; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_mov_b32_e32 v6, v7 ; GFX908-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -9079,27 +8921,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB21_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX908-NEXT: s_mov_b64 exec, s[12:13] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v6, v4 +; GFX908-NEXT: v_mov_b32_e32 v7, v5 ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB21_3 ; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v0, v4 +; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -9111,8 +8952,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 -; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 +; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB21_1 ; GFX8-NEXT: ; %bb.2: @@ -9124,27 +8964,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX8-NEXT: v_max_f32_e32 v4, v4, v8 -; GFX8-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v4 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5 -; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc -; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX8-NEXT: v_max_f32_e32 v5, v5, v9 -; GFX8-NEXT: v_bfe_u32 v10, v5, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v5 -; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10 -; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX8-NEXT: v_max_f32_e32 v5, v5, v8 +; GFX8-NEXT: v_bfe_u32 v6, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v5 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX8-NEXT: v_alignbit_b32 v5, v5, v4, 16 -; GFX8-NEXT: v_mov_b32_e32 v4, v5 -; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX8-NEXT: v_max_f32_e32 v6, v6, v9 +; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX8-NEXT: v_alignbit_b32 v6, v6, v5, 16 ; GFX8-NEXT: v_mov_b32_e32 v5, v6 +; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -9156,27 +8996,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB21_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX8-NEXT: s_mov_b64 exec, s[12:13] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v7, v5 ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB21_3 ; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v0, v4 +; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e32 v8, vcc, 0x400, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -9187,8 +9026,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3] ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] -; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX7-NEXT: ; implicit-def: $vgpr4 +; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB21_1 ; GFX7-NEXT: ; %bb.2: @@ -9196,27 +9034,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 ; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; GFX7-NEXT: s_mov_b64 s[6:7], 0 -; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v6 -; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v6 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v5 ; GFX7-NEXT: .LBB21_3: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Loop Header: Depth=1 ; GFX7-NEXT: ; Child Loop BB21_4 Depth 2 -; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v4 -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v5 ; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v7 -; GFX7-NEXT: v_max_f32_e32 v4, v4, v9 -; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v6 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v8 +; GFX7-NEXT: v_max_f32_e32 v5, v5, v10 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6 ; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_max_f32_e32 v7, v7, v10 -; GFX7-NEXT: v_alignbit_b32 v5, v5, v6, 16 -; GFX7-NEXT: v_alignbit_b32 v4, v4, v7, 16 -; GFX7-NEXT: v_mov_b32_e32 v7, v5 +; GFX7-NEXT: v_max_f32_e32 v8, v8, v11 +; GFX7-NEXT: v_alignbit_b32 v6, v6, v7, 16 +; GFX7-NEXT: v_alignbit_b32 v5, v5, v8, 16 +; GFX7-NEXT: v_mov_b32_e32 v9, v6 ; GFX7-NEXT: s_mov_b64 s[12:13], exec -; GFX7-NEXT: v_mov_b32_e32 v6, v4 +; GFX7-NEXT: v_mov_b32_e32 v8, v5 ; GFX7-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX7-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -9228,23 +9066,23 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v8, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB21_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX7-NEXT: s_mov_b64 exec, s[12:13] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v5 -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v6 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8 ; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX7-NEXT: s_cbranch_execnz .LBB21_3 ; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v4 +; GFX7-NEXT: v_mov_b32_e32 v0, v8 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; ; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: @@ -9353,10 +9191,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -9365,7 +9202,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc0 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc0 sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -9404,10 +9241,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -9416,7 +9252,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] ; GFX90A-NEXT: buffer_wbl2 -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_invl2 ; GFX90A-NEXT: buffer_wbinvl1 @@ -9434,10 +9270,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -9446,7 +9281,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -9463,10 +9298,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -9475,7 +9309,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll index 8ac6353..671f42c 100644 --- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll +++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll @@ -37,10 +37,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -49,7 +48,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -88,10 +87,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -99,7 +97,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5 ; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -116,10 +114,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -128,7 +125,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -145,10 +142,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -157,7 +153,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -212,10 +208,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v0, v0 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -223,7 +218,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: v_min_f32_e32 v0, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -262,17 +257,16 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v0, v0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_max_f32_e32 v0, v1, v1 ; GFX90A-NEXT: v_min_f32_e32 v0, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -289,10 +283,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v0, v0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -300,7 +293,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_min_f32_e32 v0, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: v_mov_b32_e32 v4, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -317,10 +310,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -328,7 +320,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_min_f32_e32 v0, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: v_mov_b32_e32 v4, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -402,7 +394,6 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX942-NEXT: s_mov_b64 s[2:3], exec ; GFX942-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -414,22 +405,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3] ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] -; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024 -; GFX942-NEXT: ; implicit-def: $vgpr4 +; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB2_1 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: s_mov_b64 exec, s[2:3] ; GFX942-NEXT: s_mov_b64 s[2:3], 0 -; GFX942-NEXT: v_max_f32_e32 v9, v5, v5 +; GFX942-NEXT: v_max_f32_e32 v5, v5, v5 ; GFX942-NEXT: .LBB2_3: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Loop Header: Depth=1 ; GFX942-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_max_f32_e32 v4, v7, v7 -; GFX942-NEXT: v_min_f32_e32 v6, v4, v9 +; GFX942-NEXT: v_max_f32_e32 v6, v9, v9 +; GFX942-NEXT: v_min_f32_e32 v8, v6, v5 ; GFX942-NEXT: s_mov_b64 s[8:9], exec -; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7] +; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9] ; GFX942-NEXT: buffer_wbl2 sc1 ; GFX942-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX942-NEXT: ; => This Inner Loop Header: Depth=2 @@ -443,21 +433,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB2_4 ; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX942-NEXT: s_mov_b64 exec, s[8:9] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v7, v4 +; GFX942-NEXT: v_mov_b32_e32 v9, v6 ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB2_3 ; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX942-NEXT: s_or_b64 exec, exec, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v0, v4 +; GFX942-NEXT: v_mov_b32_e32 v0, v6 ; GFX942-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: @@ -522,7 +512,6 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX90A-NEXT: s_mov_b64 s[6:7], exec ; GFX90A-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -534,22 +523,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX90A-NEXT: ; implicit-def: $vgpr4 +; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024 ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB2_1 ; GFX90A-NEXT: ; %bb.2: ; GFX90A-NEXT: s_mov_b64 exec, s[6:7] ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 -; GFX90A-NEXT: v_max_f32_e32 v9, v5, v5 +; GFX90A-NEXT: v_max_f32_e32 v5, v5, v5 ; GFX90A-NEXT: .LBB2_3: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_max_f32_e32 v4, v7, v7 -; GFX90A-NEXT: v_min_f32_e32 v6, v4, v9 +; GFX90A-NEXT: v_max_f32_e32 v6, v9, v9 +; GFX90A-NEXT: v_min_f32_e32 v8, v6, v5 ; GFX90A-NEXT: s_mov_b64 s[12:13], exec -; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1] ; GFX90A-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -561,27 +549,26 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB2_4 ; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX90A-NEXT: s_mov_b64 exec, s[12:13] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v7, v4 +; GFX90A-NEXT: v_mov_b32_e32 v9, v6 ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB2_3 ; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v0, v4 +; GFX90A-NEXT: v_mov_b32_e32 v0, v6 ; GFX90A-NEXT: s_setpc_b64 s[30:31] ; ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -593,8 +580,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 -; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 +; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB2_1 ; GFX908-NEXT: ; %bb.2: @@ -605,11 +591,11 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: ; =>This Loop Header: Depth=1 ; GFX908-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_max_f32_e32 v4, v6, v6 -; GFX908-NEXT: v_min_f32_e32 v5, v4, v8 -; GFX908-NEXT: v_mov_b32_e32 v4, v5 -; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_max_f32_e32 v5, v7, v7 +; GFX908-NEXT: v_min_f32_e32 v6, v5, v8 ; GFX908-NEXT: v_mov_b32_e32 v5, v6 +; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_mov_b32_e32 v6, v7 ; GFX908-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -621,27 +607,26 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB2_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX908-NEXT: s_mov_b64 exec, s[12:13] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v6, v4 +; GFX908-NEXT: v_mov_b32_e32 v7, v5 ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB2_3 ; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v0, v4 +; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -653,8 +638,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 -; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 +; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB2_1 ; GFX8-NEXT: ; %bb.2: @@ -665,11 +649,11 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v6 -; GFX8-NEXT: v_min_f32_e32 v5, v4, v8 -; GFX8-NEXT: v_mov_b32_e32 v4, v5 -; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mul_f32_e32 v5, 1.0, v7 +; GFX8-NEXT: v_min_f32_e32 v6, v5, v8 ; GFX8-NEXT: v_mov_b32_e32 v5, v6 +; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -681,21 +665,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB2_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX8-NEXT: s_mov_b64 exec, s[12:13] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v7, v5 ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB2_3 ; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v0, v4 +; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: @@ -777,10 +761,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -789,7 +772,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX942-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -804,11 +787,10 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_max_f32 v2, v1, v1 -; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_dual_mov_b32 v3, s16 :: v_dual_max_f32 v2, v1, v1 +; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) @@ -819,7 +801,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX11-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -837,11 +819,10 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: v_max_f32_e32 v2, v1, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -851,7 +832,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -869,10 +850,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -880,7 +860,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5 ; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -897,10 +877,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -909,7 +888,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -926,10 +905,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -938,7 +916,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -955,10 +933,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_mov_b32_e32 v1, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -967,7 +944,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1035,10 +1012,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -1047,7 +1023,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1086,10 +1062,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -1097,7 +1072,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5 ; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1114,10 +1089,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -1126,7 +1100,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1143,10 +1117,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -1155,7 +1128,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1201,29 +1174,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1246,30 +1217,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1301,30 +1270,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB5_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1334,30 +1300,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB5_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1397,11 +1360,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_mov_b32_e32 v2, s16 ; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1] -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v6, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 @@ -1411,7 +1372,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[4:5] ; GFX12-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2 ; GFX12-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3] @@ -1440,11 +1401,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v2, s16 ; GFX11-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1] -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v6, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) @@ -1454,7 +1413,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5] ; GFX11-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2 ; GFX11-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -1494,9 +1453,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v2, s20 ; GFX908-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v6, s20 ; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -1506,7 +1464,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v9, v2 ; GFX908-NEXT: v_mov_b32_e32 v8, v1 ; GFX908-NEXT: v_mov_b32_e32 v7, v0 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3] @@ -1525,9 +1483,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v2, s20 ; GFX8-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v6, s20 ; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -1537,7 +1494,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v9, v2 ; GFX8-NEXT: v_mov_b32_e32 v8, v1 ; GFX8-NEXT: v_mov_b32_e32 v7, v0 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3] @@ -1583,10 +1540,9 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 ; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 -; GFX12-NEXT: v_add_nc_u32_e32 v15, 0x800, v4 ; GFX12-NEXT: s_mov_b32 s1, exec_lo ; GFX12-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-NEXT: v_readfirstlane_b32 s4, v9 ; GFX12-NEXT: v_readfirstlane_b32 s5, v10 ; GFX12-NEXT: v_readfirstlane_b32 s6, v7 @@ -1600,12 +1556,11 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], null offen offset:2048 -; GFX12-NEXT: ; implicit-def: $vgpr4 ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB7_1 ; GFX12-NEXT: ; %bb.2: ; GFX12-NEXT: s_mov_b32 exec_lo, s1 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[5:6], v[5:6] +; GFX12-NEXT: v_max_num_f64_e32 v[5:6], v[5:6], v[5:6] ; GFX12-NEXT: s_mov_b32 s1, 0 ; GFX12-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Loop Header: Depth=1 @@ -1615,7 +1570,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_mov_b32 s2, exec_lo ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_min_num_f64_e32 v[11:12], v[0:1], v[4:5] +; GFX12-NEXT: v_min_num_f64_e32 v[11:12], v[0:1], v[5:6] ; GFX12-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12 ; GFX12-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14 ; GFX12-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1 @@ -1632,7 +1587,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB7_4 ; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -1686,27 +1641,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 ; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 -; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x800, v4 ; GFX11-NEXT: s_mov_b32 s1, 0 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3) +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_readfirstlane_b32 s4, v9 ; GFX11-NEXT: v_readfirstlane_b32 s5, v10 ; GFX11-NEXT: v_readfirstlane_b32 s6, v7 ; GFX11-NEXT: v_readfirstlane_b32 s7, v8 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[9:10] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[7:8] ; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], 0 offen offset:2048 -; GFX11-NEXT: ; implicit-def: $vgpr4 ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB7_1 ; GFX11-NEXT: ; %bb.2: ; GFX11-NEXT: s_mov_b32 exec_lo, s2 -; GFX11-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6] +; GFX11-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6] ; GFX11-NEXT: .p2align 6 ; GFX11-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Loop Header: Depth=1 @@ -1716,7 +1670,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_min_f64 v[11:12], v[0:1], v[4:5] +; GFX11-NEXT: v_min_f64 v[11:12], v[0:1], v[5:6] ; GFX11-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12 ; GFX11-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14 ; GFX11-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1 @@ -1732,7 +1686,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], 0 offen offset:2048 glc ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB7_4 ; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -1816,7 +1770,6 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: v_mov_b32_e32 v7, v2 ; GFX908-NEXT: v_mov_b32_e32 v10, v1 ; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_add_u32_e32 v15, 0x800, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v9 @@ -1829,12 +1782,11 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 ; GFX908-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX908-NEXT: ; implicit-def: $vgpr4 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB7_1 ; GFX908-NEXT: ; %bb.2: ; GFX908-NEXT: s_mov_b64 exec, s[6:7] -; GFX908-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6] +; GFX908-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6] ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Loop Header: Depth=1 @@ -1842,7 +1794,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14] ; GFX908-NEXT: s_mov_b64 s[12:13], exec -; GFX908-NEXT: v_min_f64 v[11:12], v[0:1], v[4:5] +; GFX908-NEXT: v_min_f64 v[11:12], v[0:1], v[5:6] ; GFX908-NEXT: v_mov_b32_e32 v0, v11 ; GFX908-NEXT: v_mov_b32_e32 v1, v12 ; GFX908-NEXT: v_mov_b32_e32 v2, v13 @@ -1858,7 +1810,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB7_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -1882,7 +1834,6 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: v_mov_b32_e32 v7, v2 ; GFX8-NEXT: v_mov_b32_e32 v10, v1 ; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_add_u32_e32 v15, vcc, 0x800, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v9 @@ -1895,12 +1846,11 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 ; GFX8-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX8-NEXT: ; implicit-def: $vgpr4 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB7_1 ; GFX8-NEXT: ; %bb.2: ; GFX8-NEXT: s_mov_b64 exec, s[6:7] -; GFX8-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6] +; GFX8-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6] ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Loop Header: Depth=1 @@ -1908,7 +1858,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14] ; GFX8-NEXT: s_mov_b64 s[12:13], exec -; GFX8-NEXT: v_min_f64 v[11:12], v[0:1], v[4:5] +; GFX8-NEXT: v_min_f64 v[11:12], v[0:1], v[5:6] ; GFX8-NEXT: v_mov_b32_e32 v0, v11 ; GFX8-NEXT: v_mov_b32_e32 v1, v12 ; GFX8-NEXT: v_mov_b32_e32 v2, v13 @@ -1924,7 +1874,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB7_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -2008,29 +1958,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2053,30 +2001,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2088,31 +2034,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v2, v0 -; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: v_mov_b32_e32 v3, v1 -; GFX10-NEXT: s_add_i32 s4, s20, 0x800 -; GFX10-NEXT: v_mov_b32_e32 v6, s4 -; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX10-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX10-NEXT: v_mov_b32_e32 v2, s20 +; GFX10-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX10-NEXT: v_mov_b32_e32 v8, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v10, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, v0 +; GFX10-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX10-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX10-NEXT: v_mov_b32_e32 v0, v7 -; GFX10-NEXT: v_mov_b32_e32 v1, v8 -; GFX10-NEXT: v_mov_b32_e32 v2, v9 -; GFX10-NEXT: v_mov_b32_e32 v3, v10 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX10-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX10-NEXT: v_mov_b32_e32 v0, v2 +; GFX10-NEXT: v_mov_b32_e32 v1, v3 +; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_mov_b32_e32 v3, v5 +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX10-NEXT: v_mov_b32_e32 v5, v1 +; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB8_1 @@ -2123,27 +2066,24 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_mov_b32_e32 v2, v0 -; GFX90A-NEXT: v_mov_b32_e32 v0, s20 -; GFX90A-NEXT: v_mov_b32_e32 v3, v1 -; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x800 +; GFX90A-NEXT: v_mov_b32_e32 v2, s20 +; GFX90A-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX90A-NEXT: v_mov_b32_e32 v6, s6 +; GFX90A-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX90A-NEXT: v_mov_b32_e32 v8, s20 ; GFX90A-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX90A-NEXT: v_min_f64 v[8:9], v[0:1], v[4:5] -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1] -; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX90A-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX90A-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1] +; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 -; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1] ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB8_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2153,30 +2093,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB8_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2186,30 +2123,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB8_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2219,30 +2153,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v2, v0 -; GFX7-NEXT: v_mov_b32_e32 v0, s20 -; GFX7-NEXT: v_mov_b32_e32 v3, v1 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX7-NEXT: s_add_i32 s6, s20, 0x800 -; GFX7-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX7-NEXT: v_mov_b32_e32 v2, s20 +; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s6 +; GFX7-NEXT: v_mov_b32_e32 v8, s20 ; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v10, v1 -; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX7-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v8 -; GFX7-NEXT: v_mov_b32_e32 v2, v9 -; GFX7-NEXT: v_mov_b32_e32 v3, v10 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX7-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX7-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX7-NEXT: v_mov_b32_e32 v0, v2 +; GFX7-NEXT: v_mov_b32_e32 v1, v3 +; GFX7-NEXT: v_mov_b32_e32 v2, v4 +; GFX7-NEXT: v_mov_b32_e32 v3, v5 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v5, v1 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: v_mov_b32_e32 v4, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB8_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2252,31 +2183,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v2, v0 -; GFX6-NEXT: v_mov_b32_e32 v0, s20 -; GFX6-NEXT: v_mov_b32_e32 v3, v1 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX6-NEXT: v_mov_b32_e32 v2, s20 +; GFX6-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX6-NEXT: s_add_i32 s6, s20, 0x800 -; GFX6-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX6-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX6-NEXT: s_mov_b64 s[4:5], 0 -; GFX6-NEXT: v_mov_b32_e32 v6, s6 +; GFX6-NEXT: v_mov_b32_e32 v8, s6 ; GFX6-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v10, v1 -; GFX6-NEXT: v_mov_b32_e32 v9, v0 -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX6-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX6-NEXT: v_mov_b32_e32 v0, v7 -; GFX6-NEXT: v_mov_b32_e32 v1, v8 -; GFX6-NEXT: v_mov_b32_e32 v2, v9 -; GFX6-NEXT: v_mov_b32_e32 v3, v10 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; GFX6-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX6-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX6-NEXT: v_mov_b32_e32 v0, v2 +; GFX6-NEXT: v_mov_b32_e32 v1, v3 +; GFX6-NEXT: v_mov_b32_e32 v2, v4 +; GFX6-NEXT: v_mov_b32_e32 v3, v5 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX6-NEXT: v_mov_b32_e32 v5, v1 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v4, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB8_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2296,29 +2224,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2341,30 +2267,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2396,30 +2320,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB9_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2429,30 +2350,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB9_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6146,13 +6064,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v3, s4 +; GFX12-NEXT: v_mov_b32_e32 v3, s16 +; GFX12-NEXT: s_mov_b32 s4, 0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX12-NEXT: v_pk_max_num_f16 v2, v1, v1 ; GFX12-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024 -; GFX12-NEXT: s_mov_b32 s4, 0 ; GFX12-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 @@ -6163,7 +6079,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX12-NEXT: v_pk_min_num_f16 v4, v0, v2 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5 @@ -6182,10 +6098,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_pk_max_f16 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -6195,7 +6110,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX942-NEXT: v_pk_min_f16 v4, v0, v2 ; GFX942-NEXT: s_nop 0 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -6210,12 +6125,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_mov_b32_e32 v3, s4 +; GFX11-NEXT: v_mov_b32_e32 v3, s16 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_pk_max_f16 v2, v1, v1 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 -; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) @@ -6226,7 +6140,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX11-NEXT: v_pk_min_f16 v4, v0, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -6244,11 +6158,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: v_pk_max_f16 v2, v1, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -6258,7 +6171,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: v_pk_min_f16 v4, v0, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -6276,10 +6189,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_pk_max_f16 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -6287,7 +6199,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX90A-NEXT: v_pk_max_f16 v0, v5, v5 ; GFX90A-NEXT: v_pk_min_f16 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -6304,10 +6216,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_pk_max_f16 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -6316,7 +6227,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_pk_min_f16 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -6333,11 +6244,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_max_f16_sdwa v2, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 ; GFX8-NEXT: v_max_f16_e32 v3, v1, v1 -; GFX8-NEXT: v_mov_b32_e32 v4, s6 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -6349,7 +6259,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_or_b32_e32 v5, v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: v_mov_b32_e32 v1, v6 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -6367,7 +6277,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -6375,7 +6284,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -6392,7 +6301,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_or_b32_e32 v5, v7, v0 ; GFX7-NEXT: v_mov_b32_e32 v8, v6 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7 @@ -6467,10 +6376,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_mov_b32_e32 v1, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400 ; GFX12-NEXT: v_pk_max_num_f16 v2, v0, v0 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v3, s4 +; GFX12-NEXT: v_mov_b32_e32 v3, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 ; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 ; GFX12-NEXT: .LBB17_1: ; %atomicrmw.start @@ -6481,7 +6388,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX12-NEXT: v_pk_min_num_f16 v0, v0, v2 ; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 -; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1 @@ -6500,10 +6407,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -6512,7 +6418,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX942-NEXT: v_pk_min_f16 v0, v0, v2 ; GFX942-NEXT: s_nop 0 ; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -6528,9 +6434,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v1, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 ; GFX11-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX11-NEXT: v_mov_b32_e32 v3, s4 +; GFX11-NEXT: v_mov_b32_e32 v3, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start @@ -6541,7 +6446,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_pk_min_f16 v0, v0, v2 ; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -6559,9 +6464,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start @@ -6572,7 +6476,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX10-NEXT: v_pk_min_f16 v0, v0, v2 ; GFX10-NEXT: v_mov_b32_e32 v5, v1 ; GFX10-NEXT: v_mov_b32_e32 v4, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -6590,17 +6494,16 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_pk_max_f16 v0, v1, v1 ; GFX90A-NEXT: v_pk_min_f16 v0, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -6617,10 +6520,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -6628,7 +6530,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX908-NEXT: v_pk_min_f16 v0, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: v_mov_b32_e32 v4, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -6645,11 +6547,10 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_max_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 ; GFX8-NEXT: v_max_f16_e32 v3, v0, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s6 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -6660,7 +6561,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX8-NEXT: v_or_b32_e32 v0, v5, v0 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -6679,7 +6580,6 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -6687,7 +6587,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2 ; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 @@ -6704,7 +6604,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: v_or_b32_e32 v4, v6, v3 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6 @@ -6778,7 +6678,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX12-NEXT: s_mov_b32 s1, exec_lo ; GFX12-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: v_readfirstlane_b32 s4, v0 @@ -6793,8 +6692,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024 -; GFX12-NEXT: ; implicit-def: $vgpr4 +; GFX12-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024 ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB18_1 ; GFX12-NEXT: ; %bb.2: @@ -6805,13 +6703,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX12-NEXT: ; =>This Loop Header: Depth=1 ; GFX12-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_pk_max_num_f16 v4, v6, v6 +; GFX12-NEXT: v_pk_max_num_f16 v5, v7, v7 ; GFX12-NEXT: s_mov_b32 s2, exec_lo ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_pk_min_num_f16 v5, v4, v8 -; GFX12-NEXT: v_mov_b32_e32 v4, v5 +; GFX12-NEXT: v_pk_min_num_f16 v6, v5, v8 ; GFX12-NEXT: v_mov_b32_e32 v5, v6 +; GFX12-NEXT: v_mov_b32_e32 v6, v7 ; GFX12-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX12-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX12-NEXT: v_readfirstlane_b32 s4, v0 @@ -6826,14 +6724,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB18_4 ; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX12-NEXT: s_mov_b32 exec_lo, s2 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX12-NEXT: v_mov_b32_e32 v6, v4 +; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX12-NEXT: v_mov_b32_e32 v7, v5 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -6841,14 +6739,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_cbranch_execnz .LBB18_3 ; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX12-NEXT: v_mov_b32_e32 v0, v4 +; GFX12-NEXT: v_mov_b32_e32 v0, v5 ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; ; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX942-NEXT: s_mov_b64 s[2:3], exec ; GFX942-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -6860,23 +6757,22 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3] ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] -; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024 -; GFX942-NEXT: ; implicit-def: $vgpr4 +; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB18_1 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: s_mov_b64 exec, s[2:3] ; GFX942-NEXT: s_mov_b64 s[2:3], 0 -; GFX942-NEXT: v_pk_max_f16 v9, v5, v5 +; GFX942-NEXT: v_pk_max_f16 v5, v5, v5 ; GFX942-NEXT: .LBB18_3: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Loop Header: Depth=1 ; GFX942-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_pk_max_f16 v4, v7, v7 +; GFX942-NEXT: v_pk_max_f16 v6, v9, v9 ; GFX942-NEXT: s_mov_b64 s[8:9], exec -; GFX942-NEXT: v_pk_min_f16 v6, v4, v9 +; GFX942-NEXT: v_pk_min_f16 v8, v6, v5 ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7] +; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9] ; GFX942-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX942-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -6889,27 +6785,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB18_4 ; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX942-NEXT: s_mov_b64 exec, s[8:9] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v7, v4 +; GFX942-NEXT: v_mov_b32_e32 v9, v6 ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB18_3 ; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX942-NEXT: s_or_b64 exec, exec, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v0, v4 +; GFX942-NEXT: v_mov_b32_e32 v0, v6 ; GFX942-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-NEXT: s_mov_b32 s1, 0 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 @@ -6923,8 +6818,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-NEXT: ; implicit-def: $vgpr4 +; GFX11-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB18_1 ; GFX11-NEXT: ; %bb.2: @@ -6935,13 +6829,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX11-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_pk_max_f16 v4, v6, v6 +; GFX11-NEXT: v_pk_max_f16 v5, v7, v7 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_pk_min_f16 v5, v4, v8 -; GFX11-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-NEXT: v_pk_min_f16 v6, v5, v8 ; GFX11-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX11-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-NEXT: v_readfirstlane_b32 s4, v0 @@ -6955,14 +6849,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB18_4 ; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX11-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv ; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -6971,13 +6865,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_cbranch_execnz .LBB18_3 ; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 @@ -6989,8 +6882,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3] ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 -; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX10-NEXT: ; implicit-def: $vgpr4 +; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB18_1 @@ -7001,12 +6893,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX10-NEXT: ; =>This Loop Header: Depth=1 ; GFX10-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_pk_max_f16 v4, v6, v6 +; GFX10-NEXT: v_pk_max_f16 v5, v7, v7 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_pk_min_f16 v5, v4, v8 -; GFX10-NEXT: v_mov_b32_e32 v4, v5 +; GFX10-NEXT: v_pk_min_f16 v6, v5, v8 ; GFX10-NEXT: v_mov_b32_e32 v5, v6 +; GFX10-NEXT: v_mov_b32_e32 v6, v7 ; GFX10-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX10-NEXT: v_readfirstlane_b32 s8, v0 @@ -7018,15 +6910,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB18_4 ; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX10-NEXT: s_mov_b32 exec_lo, s6 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX10-NEXT: v_mov_b32_e32 v7, v5 ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 @@ -7035,13 +6927,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX10-NEXT: s_cbranch_execnz .LBB18_3 ; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: v_mov_b32_e32 v0, v4 +; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX90A-NEXT: s_mov_b64 s[6:7], exec ; GFX90A-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -7053,22 +6944,21 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX90A-NEXT: ; implicit-def: $vgpr4 +; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024 ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB18_1 ; GFX90A-NEXT: ; %bb.2: ; GFX90A-NEXT: s_mov_b64 exec, s[6:7] ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 -; GFX90A-NEXT: v_pk_max_f16 v9, v5, v5 +; GFX90A-NEXT: v_pk_max_f16 v5, v5, v5 ; GFX90A-NEXT: .LBB18_3: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_pk_max_f16 v4, v7, v7 -; GFX90A-NEXT: v_pk_min_f16 v6, v4, v9 +; GFX90A-NEXT: v_pk_max_f16 v6, v9, v9 +; GFX90A-NEXT: v_pk_min_f16 v8, v6, v5 ; GFX90A-NEXT: s_mov_b64 s[12:13], exec -; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1] ; GFX90A-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -7080,27 +6970,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB18_4 ; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX90A-NEXT: s_mov_b64 exec, s[12:13] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v7, v4 +; GFX90A-NEXT: v_mov_b32_e32 v9, v6 ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB18_3 ; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v0, v4 +; GFX90A-NEXT: v_mov_b32_e32 v0, v6 ; GFX90A-NEXT: s_setpc_b64 s[30:31] ; ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -7112,8 +7001,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 -; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 +; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB18_1 ; GFX908-NEXT: ; %bb.2: @@ -7124,11 +7012,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX908-NEXT: ; =>This Loop Header: Depth=1 ; GFX908-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_pk_max_f16 v4, v6, v6 -; GFX908-NEXT: v_pk_min_f16 v5, v4, v8 -; GFX908-NEXT: v_mov_b32_e32 v4, v5 -; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_pk_max_f16 v5, v7, v7 +; GFX908-NEXT: v_pk_min_f16 v6, v5, v8 ; GFX908-NEXT: v_mov_b32_e32 v5, v6 +; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_mov_b32_e32 v6, v7 ; GFX908-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -7140,27 +7028,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB18_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX908-NEXT: s_mov_b64 exec, s[12:13] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v6, v4 +; GFX908-NEXT: v_mov_b32_e32 v7, v5 ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB18_3 ; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v0, v4 +; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -7172,8 +7059,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 -; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 +; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB18_1 ; GFX8-NEXT: ; %bb.2: @@ -7185,14 +7071,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_max_f16_sdwa v4, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_max_f16_e32 v5, v6, v6 -; GFX8-NEXT: v_min_f16_sdwa v4, v4, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX8-NEXT: v_min_f16_e32 v5, v5, v9 -; GFX8-NEXT: v_or_b32_e32 v5, v5, v4 -; GFX8-NEXT: v_mov_b32_e32 v4, v5 -; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_max_f16_sdwa v5, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_max_f16_e32 v6, v7, v7 +; GFX8-NEXT: v_min_f16_sdwa v5, v5, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_min_f16_e32 v6, v6, v9 +; GFX8-NEXT: v_or_b32_e32 v6, v6, v5 ; GFX8-NEXT: v_mov_b32_e32 v5, v6 +; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -7204,27 +7090,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB18_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX8-NEXT: s_mov_b64 exec, s[12:13] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v7, v5 ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB18_3 ; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v0, v4 +; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -7235,39 +7120,38 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3] ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] -; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX7-NEXT: ; implicit-def: $vgpr4 +; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB18_1 ; GFX7-NEXT: ; %bb.2: ; GFX7-NEXT: s_mov_b64 exec, s[6:7] ; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6 -; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v5 +; GFX7-NEXT: v_cvt_f16_f32_e32 v9, v5 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8 ; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5 ; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v6 -; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v9 ; GFX7-NEXT: s_mov_b64 s[6:7], 0 ; GFX7-NEXT: .LBB18_3: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Loop Header: Depth=1 ; GFX7-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5 -; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 +; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v7 ; GFX7-NEXT: s_mov_b64 s[12:13], exec -; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5 -; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v4 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v5 +; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v6 ; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX7-NEXT: v_min_f32_e32 v6, v6, v10 -; GFX7-NEXT: v_min_f32_e32 v7, v7, v11 -; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v6 +; GFX7-NEXT: v_or_b32_e32 v6, v6, v5 +; GFX7-NEXT: v_min_f32_e32 v7, v7, v10 +; GFX7-NEXT: v_min_f32_e32 v8, v8, v11 ; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7 -; GFX7-NEXT: v_or_b32_e32 v6, v4, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; GFX7-NEXT: v_or_b32_e32 v5, v7, v4 -; GFX7-NEXT: v_mov_b32_e32 v8, v6 -; GFX7-NEXT: v_mov_b32_e32 v7, v5 +; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX7-NEXT: v_or_b32_e32 v5, v8, v5 +; GFX7-NEXT: v_mov_b32_e32 v9, v6 +; GFX7-NEXT: v_mov_b32_e32 v8, v5 ; GFX7-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX7-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -7279,23 +7163,23 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v9, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB18_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX7-NEXT: s_mov_b64 exec, s[12:13] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8 ; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v6 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6 ; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX7-NEXT: s_cbranch_execnz .LBB18_3 ; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v4 +; GFX7-NEXT: v_mov_b32_e32 v0, v7 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -7396,13 +7280,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe +; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1 +; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1 ; GFX12-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024 ; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 -; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 @@ -7431,7 +7313,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6 @@ -7452,11 +7334,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400 ; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0 -; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1 +; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1 ; GFX12-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024 ; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start @@ -7487,7 +7367,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX12-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6 @@ -7506,13 +7386,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -7534,7 +7413,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5] ; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7] -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -7549,12 +7428,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1 +; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1 ; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 -; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start @@ -7583,7 +7461,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -7601,10 +7479,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1 ; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 @@ -7635,7 +7512,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -7654,9 +7531,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 @@ -7682,7 +7558,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: v_mov_b32_e32 v1, v6 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -7700,13 +7576,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -7727,7 +7602,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -7744,13 +7619,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -7772,7 +7646,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: v_mov_b32_e32 v1, v6 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -7789,11 +7663,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -7818,7 +7691,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: v_mov_b32_e32 v1, v6 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -7834,7 +7707,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -7843,7 +7715,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 @@ -7858,7 +7730,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: v_alignbit_b32 v0, v0, v6, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v1 ; GFX7-NEXT: v_mov_b32_e32 v5, v0 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -7928,11 +7800,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe -; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 +; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 ; GFX12-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 @@ -7958,7 +7828,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1 @@ -7980,11 +7850,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe -; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 +; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 ; GFX12-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 @@ -8010,7 +7878,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX12-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1 @@ -8029,13 +7897,12 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -8056,7 +7923,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] ; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -8072,11 +7939,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start @@ -8102,7 +7967,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -8121,11 +7986,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start @@ -8151,7 +8014,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -8170,12 +8033,11 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 -; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -8197,7 +8059,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v6, v1 ; GFX10-NEXT: v_mov_b32_e32 v5, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -8215,13 +8077,12 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -8241,7 +8102,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -8258,13 +8119,12 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -8285,7 +8145,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v6, v1 ; GFX908-NEXT: v_mov_b32_e32 v5, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -8302,11 +8162,10 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -8330,7 +8189,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -8347,7 +8206,6 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v0 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -8356,7 +8214,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 @@ -8371,7 +8229,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: v_alignbit_b32 v3, v3, v6, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4 @@ -8440,7 +8298,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo ; GFX12-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8455,8 +8312,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024 -; GFX12-TRUE16-NEXT: ; implicit-def: $vgpr4 +; GFX12-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024 ; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX12-TRUE16-NEXT: ; %bb.2: @@ -8468,30 +8324,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v5, v5, v8 :: v_dual_min_num_f32 v4, v4, v9 -; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v6, v6, v8 :: v_dual_min_num_f32 v5, v5, v9 +; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v5 ; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX12-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8506,14 +8362,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -8521,7 +8377,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_3 ; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -8532,7 +8388,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo ; GFX12-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8547,8 +8402,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024 -; GFX12-FAKE16-NEXT: ; implicit-def: $vgpr4 +; GFX12-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024 ; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX12-FAKE16-NEXT: ; %bb.2: @@ -8560,30 +8414,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v5, v5, v9 :: v_dual_min_num_f32 v4, v4, v8 -; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v6, v6, v9 :: v_dual_min_num_f32 v5, v5, v8 +; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff +; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo -; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo +; GFX12-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v5 ; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX12-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8598,14 +8452,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -8613,14 +8467,13 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_3 ; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX942-NEXT: s_mov_b64 s[2:3], exec ; GFX942-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -8632,40 +8485,39 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3] ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] -; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024 -; GFX942-NEXT: ; implicit-def: $vgpr4 +; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB21_1 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: s_mov_b64 exec, s[2:3] ; GFX942-NEXT: s_mov_b64 s[2:3], 0 -; GFX942-NEXT: v_lshlrev_b32_e32 v9, 16, v5 +; GFX942-NEXT: v_lshlrev_b32_e32 v10, 16, v5 ; GFX942-NEXT: s_movk_i32 s10, 0x7fff -; GFX942-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX942-NEXT: s_mov_b32 s11, 0x7060302 ; GFX942-NEXT: .LBB21_3: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Loop Header: Depth=1 ; GFX942-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v7 -; GFX942-NEXT: v_min_f32_e32 v4, v4, v9 -; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX942-NEXT: v_add3_u32 v5, v5, v4, s10 -; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX942-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX942-NEXT: v_min_f32_e32 v6, v6, v10 +; GFX942-NEXT: v_bfe_u32 v7, v6, 16, 1 +; GFX942-NEXT: v_add3_u32 v7, v7, v6, s10 +; GFX942-NEXT: v_or_b32_e32 v8, 0x400000, v6 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 ; GFX942-NEXT: s_mov_b64 s[8:9], exec ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v7 -; GFX942-NEXT: v_min_f32_e32 v5, v5, v10 -; GFX942-NEXT: v_bfe_u32 v6, v5, 16, 1 -; GFX942-NEXT: v_add3_u32 v6, v6, v5, s10 -; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX942-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc +; GFX942-NEXT: v_and_b32_e32 v7, 0xffff0000, v9 +; GFX942-NEXT: v_min_f32_e32 v7, v7, v5 +; GFX942-NEXT: v_bfe_u32 v8, v7, 16, 1 +; GFX942-NEXT: v_add3_u32 v8, v8, v7, s10 +; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc -; GFX942-NEXT: v_perm_b32 v6, v5, v4, s11 -; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7] +; GFX942-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc +; GFX942-NEXT: v_perm_b32 v8, v7, v6, s11 +; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9] ; GFX942-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX942-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -8678,27 +8530,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB21_4 ; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX942-NEXT: s_mov_b64 exec, s[8:9] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v7, v4 +; GFX942-NEXT: v_mov_b32_e32 v9, v6 ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB21_3 ; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX942-NEXT: s_or_b64 exec, exec, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v0, v4 +; GFX942-NEXT: v_mov_b32_e32 v0, v6 ; GFX942-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0 ; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -8712,8 +8563,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4 +; GFX11-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX11-TRUE16-NEXT: ; %bb.2: @@ -8726,28 +8576,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_min_f32 v5, v5, v8 :: v_dual_min_f32 v4, v4, v9 -; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11-TRUE16-NEXT: v_dual_min_f32 v6, v6, v8 :: v_dual_min_f32 v5, v5, v9 +; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo +; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8761,14 +8611,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv ; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -8778,13 +8628,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0 ; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -8798,8 +8647,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4 +; GFX11-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX11-FAKE16-NEXT: ; %bb.2: @@ -8812,28 +8660,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_min_f32 v5, v5, v9 :: v_dual_min_f32 v4, v4, v8 -; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11-FAKE16-NEXT: v_dual_min_f32 v6, v6, v9 :: v_dual_min_f32 v5, v5, v8 +; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo +; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8847,14 +8695,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv ; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -8864,13 +8712,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2 ; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -8882,8 +8729,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3] ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 -; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX10-NEXT: ; implicit-def: $vgpr4 +; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB21_1 @@ -8895,25 +8741,25 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX10-NEXT: ; =>This Loop Header: Depth=1 ; GFX10-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_min_f32_e32 v4, v4, v8 -; GFX10-NEXT: v_min_f32_e32 v5, v5, v9 -; GFX10-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX10-NEXT: v_bfe_u32 v11, v5, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX10-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX10-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX10-NEXT: v_min_f32_e32 v5, v5, v8 +; GFX10-NEXT: v_min_f32_e32 v6, v6, v9 +; GFX10-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX10-NEXT: v_bfe_u32 v11, v6, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX10-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo -; GFX10-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 -; GFX10-NEXT: v_mov_b32_e32 v4, v5 +; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX10-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX10-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX10-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo +; GFX10-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v5, v6 +; GFX10-NEXT: v_mov_b32_e32 v6, v7 ; GFX10-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX10-NEXT: v_readfirstlane_b32 s8, v0 @@ -8925,15 +8771,15 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB21_4 ; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX10-NEXT: s_mov_b32 exec_lo, s6 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX10-NEXT: v_mov_b32_e32 v7, v5 ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 @@ -8942,13 +8788,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX10-NEXT: s_cbranch_execnz .LBB21_3 ; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: v_mov_b32_e32 v0, v4 +; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX90A-NEXT: s_mov_b64 s[6:7], exec ; GFX90A-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -8960,38 +8805,37 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX90A-NEXT: ; implicit-def: $vgpr4 +; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024 ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB21_1 ; GFX90A-NEXT: ; %bb.2: ; GFX90A-NEXT: s_mov_b64 exec, s[6:7] ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 -; GFX90A-NEXT: v_lshlrev_b32_e32 v9, 16, v5 +; GFX90A-NEXT: v_lshlrev_b32_e32 v10, 16, v5 ; GFX90A-NEXT: s_movk_i32 s14, 0x7fff -; GFX90A-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX90A-NEXT: s_mov_b32 s15, 0x7060302 ; GFX90A-NEXT: .LBB21_3: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v7 -; GFX90A-NEXT: v_min_f32_e32 v4, v4, v9 -; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s14 -; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v7 -; GFX90A-NEXT: v_min_f32_e32 v5, v5, v10 -; GFX90A-NEXT: v_bfe_u32 v6, v5, 16, 1 -; GFX90A-NEXT: v_add3_u32 v6, v6, v5, s14 -; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX90A-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc -; GFX90A-NEXT: v_perm_b32 v6, v5, v4, s15 +; GFX90A-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX90A-NEXT: v_min_f32_e32 v6, v6, v10 +; GFX90A-NEXT: v_bfe_u32 v7, v6, 16, 1 +; GFX90A-NEXT: v_add3_u32 v7, v7, v6, s14 +; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX90A-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc +; GFX90A-NEXT: v_and_b32_e32 v7, 0xffff0000, v9 +; GFX90A-NEXT: v_min_f32_e32 v7, v7, v5 +; GFX90A-NEXT: v_bfe_u32 v8, v7, 16, 1 +; GFX90A-NEXT: v_add3_u32 v8, v8, v7, s14 +; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX90A-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc +; GFX90A-NEXT: v_perm_b32 v8, v7, v6, s15 ; GFX90A-NEXT: s_mov_b64 s[12:13], exec -; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1] ; GFX90A-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -9003,27 +8847,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB21_4 ; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX90A-NEXT: s_mov_b64 exec, s[12:13] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v7, v4 +; GFX90A-NEXT: v_mov_b32_e32 v9, v6 ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB21_3 ; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v0, v4 +; GFX90A-NEXT: v_mov_b32_e32 v0, v6 ; GFX90A-NEXT: s_setpc_b64 s[30:31] ; ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -9035,8 +8878,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 -; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 +; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB21_1 ; GFX908-NEXT: ; %bb.2: @@ -9050,24 +8892,24 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX908-NEXT: ; =>This Loop Header: Depth=1 ; GFX908-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX908-NEXT: v_min_f32_e32 v4, v4, v8 -; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX908-NEXT: v_add3_u32 v5, v5, v4, s14 -; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v4 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc -; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX908-NEXT: v_min_f32_e32 v5, v5, v9 -; GFX908-NEXT: v_bfe_u32 v10, v5, 16, 1 -; GFX908-NEXT: v_add3_u32 v10, v10, v5, s14 -; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v5 +; GFX908-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX908-NEXT: v_min_f32_e32 v5, v5, v8 +; GFX908-NEXT: v_bfe_u32 v6, v5, 16, 1 +; GFX908-NEXT: v_add3_u32 v6, v6, v5, s14 +; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v5 ; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX908-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc -; GFX908-NEXT: v_perm_b32 v5, v5, v4, s15 -; GFX908-NEXT: v_mov_b32_e32 v4, v5 -; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc +; GFX908-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX908-NEXT: v_min_f32_e32 v6, v6, v9 +; GFX908-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX908-NEXT: v_add3_u32 v10, v10, v6, s14 +; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX908-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX908-NEXT: v_perm_b32 v6, v6, v5, s15 ; GFX908-NEXT: v_mov_b32_e32 v5, v6 +; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_mov_b32_e32 v6, v7 ; GFX908-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -9079,27 +8921,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB21_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX908-NEXT: s_mov_b64 exec, s[12:13] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v6, v4 +; GFX908-NEXT: v_mov_b32_e32 v7, v5 ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB21_3 ; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v0, v4 +; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -9111,8 +8952,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 -; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 +; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB21_1 ; GFX8-NEXT: ; %bb.2: @@ -9124,27 +8964,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX8-NEXT: v_min_f32_e32 v4, v4, v8 -; GFX8-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v4 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5 -; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc -; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX8-NEXT: v_min_f32_e32 v5, v5, v9 -; GFX8-NEXT: v_bfe_u32 v10, v5, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v5 -; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10 -; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX8-NEXT: v_min_f32_e32 v5, v5, v8 +; GFX8-NEXT: v_bfe_u32 v6, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v5 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX8-NEXT: v_alignbit_b32 v5, v5, v4, 16 -; GFX8-NEXT: v_mov_b32_e32 v4, v5 -; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX8-NEXT: v_min_f32_e32 v6, v6, v9 +; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX8-NEXT: v_alignbit_b32 v6, v6, v5, 16 ; GFX8-NEXT: v_mov_b32_e32 v5, v6 +; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -9156,27 +8996,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB21_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX8-NEXT: s_mov_b64 exec, s[12:13] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v7, v5 ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB21_3 ; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v0, v4 +; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e32 v8, vcc, 0x400, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -9187,8 +9026,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3] ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] -; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX7-NEXT: ; implicit-def: $vgpr4 +; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB21_1 ; GFX7-NEXT: ; %bb.2: @@ -9196,27 +9034,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 ; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; GFX7-NEXT: s_mov_b64 s[6:7], 0 -; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v6 -; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v6 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v5 ; GFX7-NEXT: .LBB21_3: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Loop Header: Depth=1 ; GFX7-NEXT: ; Child Loop BB21_4 Depth 2 -; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v4 -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v5 ; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v7 -; GFX7-NEXT: v_min_f32_e32 v4, v4, v9 -; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v6 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v8 +; GFX7-NEXT: v_min_f32_e32 v5, v5, v10 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6 ; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_min_f32_e32 v7, v7, v10 -; GFX7-NEXT: v_alignbit_b32 v5, v5, v6, 16 -; GFX7-NEXT: v_alignbit_b32 v4, v4, v7, 16 -; GFX7-NEXT: v_mov_b32_e32 v7, v5 +; GFX7-NEXT: v_min_f32_e32 v8, v8, v11 +; GFX7-NEXT: v_alignbit_b32 v6, v6, v7, 16 +; GFX7-NEXT: v_alignbit_b32 v5, v5, v8, 16 +; GFX7-NEXT: v_mov_b32_e32 v9, v6 ; GFX7-NEXT: s_mov_b64 s[12:13], exec -; GFX7-NEXT: v_mov_b32_e32 v6, v4 +; GFX7-NEXT: v_mov_b32_e32 v8, v5 ; GFX7-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX7-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -9228,23 +9066,23 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v8, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB21_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX7-NEXT: s_mov_b64 exec, s[12:13] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v5 -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v6 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8 ; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX7-NEXT: s_cbranch_execnz .LBB21_3 ; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v4 +; GFX7-NEXT: v_mov_b32_e32 v0, v8 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; ; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: @@ -9353,10 +9191,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -9365,7 +9202,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc0 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc0 sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -9404,10 +9241,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -9416,7 +9252,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] ; GFX90A-NEXT: buffer_wbl2 -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_invl2 ; GFX90A-NEXT: buffer_wbinvl1 @@ -9434,10 +9270,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -9446,7 +9281,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -9463,10 +9298,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -9475,7 +9309,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll index 3c991cf..afd0f01 100644 --- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll +++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll @@ -782,69 +782,90 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp ; SDAG-GFX942-LABEL: memcpy_known_medium: ; SDAG-GFX942: ; %bb.0: ; SDAG-GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; SDAG-GFX942-NEXT: s_load_dword s13, s[4:5], 0x34 +; SDAG-GFX942-NEXT: s_load_dword s17, s[4:5], 0x34 ; SDAG-GFX942-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x44 -; SDAG-GFX942-NEXT: s_load_dword s14, s[4:5], 0x54 -; SDAG-GFX942-NEXT: s_mov_b32 s12, 0 -; SDAG-GFX942-NEXT: s_mov_b32 s5, s12 -; SDAG-GFX942-NEXT: v_mov_b32_e32 v0, 0 +; SDAG-GFX942-NEXT: s_load_dword s12, s[4:5], 0x54 +; SDAG-GFX942-NEXT: s_mov_b32 s16, 0 +; SDAG-GFX942-NEXT: s_mov_b32 s5, s16 ; SDAG-GFX942-NEXT: s_waitcnt lgkmcnt(0) ; SDAG-GFX942-NEXT: s_mov_b32 s4, s3 -; SDAG-GFX942-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13] -; SDAG-GFX942-NEXT: s_mov_b32 s13, s2 +; SDAG-GFX942-NEXT: s_or_b64 s[6:7], s[4:5], s[16:17] +; SDAG-GFX942-NEXT: s_mov_b32 s17, s2 ; SDAG-GFX942-NEXT: s_mov_b32 s2, s1 -; SDAG-GFX942-NEXT: s_mov_b32 s3, s12 -; SDAG-GFX942-NEXT: s_or_b64 s[4:5], s[2:3], s[12:13] -; SDAG-GFX942-NEXT: s_mov_b32 s13, s14 +; SDAG-GFX942-NEXT: s_mov_b32 s3, s16 +; SDAG-GFX942-NEXT: s_or_b64 s[4:5], s[2:3], s[16:17] +; SDAG-GFX942-NEXT: s_mov_b32 s17, s12 ; SDAG-GFX942-NEXT: s_mov_b32 s2, s11 -; SDAG-GFX942-NEXT: s_or_b64 s[14:15], s[2:3], s[12:13] -; SDAG-GFX942-NEXT: s_mov_b32 s13, s10 +; SDAG-GFX942-NEXT: s_or_b64 s[14:15], s[2:3], s[16:17] +; SDAG-GFX942-NEXT: s_mov_b32 s17, s10 ; SDAG-GFX942-NEXT: s_mov_b32 s2, s9 -; SDAG-GFX942-NEXT: s_or_b64 s[12:13], s[2:3], s[12:13] +; SDAG-GFX942-NEXT: s_or_b64 s[12:13], s[2:3], s[16:17] ; SDAG-GFX942-NEXT: .LBB1_1: ; %load-store-loop ; SDAG-GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 -; SDAG-GFX942-NEXT: v_add_u32_e32 v1, s0, v0 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[2:5], v1, s[4:7], 0 offen -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[6:9], v1, s[4:7], 0 offen offset:16 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[10:13], v1, s[4:7], 0 offen offset:32 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[14:17], v1, s[4:7], 0 offen offset:48 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[18:21], v1, s[4:7], 0 offen offset:64 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[22:25], v1, s[4:7], 0 offen offset:80 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[26:29], v1, s[4:7], 0 offen offset:96 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[30:33], v1, s[4:7], 0 offen offset:112 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[34:37], v1, s[4:7], 0 offen offset:128 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[38:41], v1, s[4:7], 0 offen offset:144 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[42:45], v1, s[4:7], 0 offen offset:160 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[46:49], v1, s[4:7], 0 offen offset:176 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[50:53], v1, s[4:7], 0 offen offset:192 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[54:57], v1, s[4:7], 0 offen offset:208 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[58:61], v1, s[4:7], 0 offen offset:224 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 a[0:3], v1, s[4:7], 0 offen offset:240 -; SDAG-GFX942-NEXT: v_add_u32_e32 v62, s8, v0 -; SDAG-GFX942-NEXT: v_add_co_u32_e32 v0, vcc, 0x100, v0 -; SDAG-GFX942-NEXT: s_and_b64 vcc, exec, vcc -; SDAG-GFX942-NEXT: s_waitcnt vmcnt(0) -; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v63, a3 ; Reload Reuse -; SDAG-GFX942-NEXT: scratch_store_dwordx3 off, a[0:2], off ; 12-byte Folded Spill -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[12:15], 0 offen -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[6:9], v62, s[12:15], 0 offen offset:16 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[10:13], v62, s[12:15], 0 offen offset:32 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[14:17], v62, s[12:15], 0 offen offset:48 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[18:21], v62, s[12:15], 0 offen offset:64 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[22:25], v62, s[12:15], 0 offen offset:80 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[26:29], v62, s[12:15], 0 offen offset:96 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[30:33], v62, s[12:15], 0 offen offset:112 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[34:37], v62, s[12:15], 0 offen offset:128 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[38:41], v62, s[12:15], 0 offen offset:144 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[42:45], v62, s[12:15], 0 offen offset:160 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[46:49], v62, s[12:15], 0 offen offset:176 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[50:53], v62, s[12:15], 0 offen offset:192 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[54:57], v62, s[12:15], 0 offen offset:208 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[58:61], v62, s[12:15], 0 offen offset:224 -; SDAG-GFX942-NEXT: scratch_load_dwordx3 v[2:4], off, off ; 12-byte Folded Reload +; SDAG-GFX942-NEXT: s_add_i32 s1, s0, s16 +; SDAG-GFX942-NEXT: v_mov_b32_e32 v60, s1 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[8:11], v60, s[4:7], 0 offen +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[4:7], v60, s[4:7], 0 offen offset:16 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[12:15], v60, s[4:7], 0 offen offset:32 +; SDAG-GFX942-NEXT: s_add_i32 s2, s8, s16 +; SDAG-GFX942-NEXT: v_mov_b32_e32 v0, s2 +; SDAG-GFX942-NEXT: s_addk_i32 s16, 0x100 +; SDAG-GFX942-NEXT: s_cmpk_lt_u32 s16, 0x100 ; SDAG-GFX942-NEXT: s_waitcnt vmcnt(0) -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[12:15], 0 offen offset:240 -; SDAG-GFX942-NEXT: s_cbranch_vccnz .LBB1_1 +; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a0, v15 ; Reload Reuse +; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a1, v14 ; Reload Reuse +; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a2, v13 ; Reload Reuse +; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a3, v12 ; Reload Reuse +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[12:15], v60, s[4:7], 0 offen offset:48 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[16:19], v60, s[4:7], 0 offen offset:64 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[20:23], v60, s[4:7], 0 offen offset:80 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[24:27], v60, s[4:7], 0 offen offset:96 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[28:31], v60, s[4:7], 0 offen offset:112 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[32:35], v60, s[4:7], 0 offen offset:128 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[36:39], v60, s[4:7], 0 offen offset:144 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[40:43], v60, s[4:7], 0 offen offset:160 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[44:47], v60, s[4:7], 0 offen offset:176 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[48:51], v60, s[4:7], 0 offen offset:192 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[52:55], v60, s[4:7], 0 offen offset:208 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[56:59], v60, s[4:7], 0 offen offset:224 +; SDAG-GFX942-NEXT: s_nop 0 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[60:63], v60, s[4:7], 0 offen offset:240 +; SDAG-GFX942-NEXT: s_nop 0 +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[8:11], v0, s[12:15], 0 offen +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[4:7], v0, s[12:15], 0 offen offset:16 +; SDAG-GFX942-NEXT: s_nop 1 +; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v5, a0 ; Reload Reuse +; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v4, a1 ; Reload Reuse +; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v3, a2 ; Reload Reuse +; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v2, a3 ; Reload Reuse +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v0, s[12:15], 0 offen offset:32 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[12:15], v0, s[12:15], 0 offen offset:48 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[16:19], v0, s[12:15], 0 offen offset:64 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[20:23], v0, s[12:15], 0 offen offset:80 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[24:27], v0, s[12:15], 0 offen offset:96 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[28:31], v0, s[12:15], 0 offen offset:112 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[32:35], v0, s[12:15], 0 offen offset:128 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[36:39], v0, s[12:15], 0 offen offset:144 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[40:43], v0, s[12:15], 0 offen offset:160 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[44:47], v0, s[12:15], 0 offen offset:176 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[48:51], v0, s[12:15], 0 offen offset:192 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[52:55], v0, s[12:15], 0 offen offset:208 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[56:59], v0, s[12:15], 0 offen offset:224 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[60:63], v0, s[12:15], 0 offen offset:240 +; SDAG-GFX942-NEXT: s_cbranch_scc1 .LBB1_1 ; SDAG-GFX942-NEXT: ; %bb.2: ; %memcpy-split ; SDAG-GFX942-NEXT: s_endpgm ; @@ -852,84 +873,87 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp ; SDAG-GFX1100: ; %bb.0: ; SDAG-GFX1100-NEXT: s_clause 0x3 ; SDAG-GFX1100-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; SDAG-GFX1100-NEXT: s_load_b32 s13, s[4:5], 0x34 +; SDAG-GFX1100-NEXT: s_load_b32 s17, s[4:5], 0x34 ; SDAG-GFX1100-NEXT: s_load_b128 s[8:11], s[4:5], 0x44 ; SDAG-GFX1100-NEXT: s_load_b32 s18, s[4:5], 0x54 -; SDAG-GFX1100-NEXT: s_mov_b32 s12, 0 -; SDAG-GFX1100-NEXT: v_mov_b32_e32 v0, 0 -; SDAG-GFX1100-NEXT: s_mov_b32 s5, s12 -; SDAG-GFX1100-NEXT: s_mov_b32 s15, s12 -; SDAG-GFX1100-NEXT: s_mov_b32 s17, s12 +; SDAG-GFX1100-NEXT: s_mov_b32 s16, 0 +; SDAG-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; SDAG-GFX1100-NEXT: s_mov_b32 s5, s16 +; SDAG-GFX1100-NEXT: s_mov_b32 s13, s16 +; SDAG-GFX1100-NEXT: s_mov_b32 s15, s16 ; SDAG-GFX1100-NEXT: s_waitcnt lgkmcnt(0) ; SDAG-GFX1100-NEXT: s_mov_b32 s4, s3 -; SDAG-GFX1100-NEXT: s_mov_b32 s14, s1 -; SDAG-GFX1100-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13] -; SDAG-GFX1100-NEXT: s_mov_b32 s13, s2 -; SDAG-GFX1100-NEXT: s_mov_b32 s16, s11 -; SDAG-GFX1100-NEXT: s_or_b64 s[4:5], s[14:15], s[12:13] -; SDAG-GFX1100-NEXT: s_mov_b32 s13, s18 +; SDAG-GFX1100-NEXT: s_mov_b32 s12, s1 +; SDAG-GFX1100-NEXT: s_or_b64 s[6:7], s[4:5], s[16:17] +; SDAG-GFX1100-NEXT: s_mov_b32 s17, s2 +; SDAG-GFX1100-NEXT: s_mov_b32 s14, s11 +; SDAG-GFX1100-NEXT: s_or_b64 s[4:5], s[12:13], s[16:17] +; SDAG-GFX1100-NEXT: s_mov_b32 s17, s18 ; SDAG-GFX1100-NEXT: s_mov_b32 s2, s9 -; SDAG-GFX1100-NEXT: s_or_b64 s[14:15], s[16:17], s[12:13] -; SDAG-GFX1100-NEXT: s_mov_b32 s13, s10 -; SDAG-GFX1100-NEXT: s_mov_b32 s3, s12 +; SDAG-GFX1100-NEXT: s_or_b64 s[14:15], s[14:15], s[16:17] +; SDAG-GFX1100-NEXT: s_mov_b32 s17, s10 +; SDAG-GFX1100-NEXT: s_mov_b32 s3, s16 ; SDAG-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; SDAG-GFX1100-NEXT: s_or_b64 s[12:13], s[2:3], s[12:13] +; SDAG-GFX1100-NEXT: s_or_b64 s[12:13], s[2:3], s[16:17] ; SDAG-GFX1100-NEXT: .LBB1_1: ; %load-store-loop ; SDAG-GFX1100-NEXT: ; =>This Inner Loop Header: Depth=1 -; SDAG-GFX1100-NEXT: v_add_nc_u32_e32 v61, s0, v0 -; SDAG-GFX1100-NEXT: v_add_nc_u32_e32 v65, s8, v0 -; SDAG-GFX1100-NEXT: v_add_co_u32 v0, s1, 0x100, v0 -; SDAG-GFX1100-NEXT: s_and_b32 vcc_lo, exec_lo, s1 +; SDAG-GFX1100-NEXT: s_add_i32 s1, s0, s16 +; SDAG-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; SDAG-GFX1100-NEXT: v_mov_b32_e32 v60, s1 +; SDAG-GFX1100-NEXT: s_add_i32 s1, s8, s16 +; SDAG-GFX1100-NEXT: s_addk_i32 s16, 0x100 +; SDAG-GFX1100-NEXT: v_mov_b32_e32 v64, s1 +; SDAG-GFX1100-NEXT: s_cmpk_lt_u32 s16, 0x100 ; SDAG-GFX1100-NEXT: s_clause 0xf -; SDAG-GFX1100-NEXT: buffer_load_b128 v[1:4], v61, s[4:7], 0 offen -; SDAG-GFX1100-NEXT: buffer_load_b128 v[5:8], v61, s[4:7], 0 offen offset:16 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[9:12], v61, s[4:7], 0 offen offset:32 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[13:16], v61, s[4:7], 0 offen offset:48 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[17:20], v61, s[4:7], 0 offen offset:64 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[21:24], v61, s[4:7], 0 offen offset:80 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[25:28], v61, s[4:7], 0 offen offset:96 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[29:32], v61, s[4:7], 0 offen offset:112 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[33:36], v61, s[4:7], 0 offen offset:128 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[37:40], v61, s[4:7], 0 offen offset:144 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[41:44], v61, s[4:7], 0 offen offset:160 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[45:48], v61, s[4:7], 0 offen offset:176 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[49:52], v61, s[4:7], 0 offen offset:192 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[53:56], v61, s[4:7], 0 offen offset:208 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[57:60], v61, s[4:7], 0 offen offset:224 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[61:64], v61, s[4:7], 0 offen offset:240 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[0:3], v60, s[4:7], 0 offen +; SDAG-GFX1100-NEXT: buffer_load_b128 v[4:7], v60, s[4:7], 0 offen offset:16 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[8:11], v60, s[4:7], 0 offen offset:32 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[12:15], v60, s[4:7], 0 offen offset:48 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[16:19], v60, s[4:7], 0 offen offset:64 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[20:23], v60, s[4:7], 0 offen offset:80 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[24:27], v60, s[4:7], 0 offen offset:96 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[28:31], v60, s[4:7], 0 offen offset:112 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[32:35], v60, s[4:7], 0 offen offset:128 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[36:39], v60, s[4:7], 0 offen offset:144 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[40:43], v60, s[4:7], 0 offen offset:160 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[44:47], v60, s[4:7], 0 offen offset:176 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[48:51], v60, s[4:7], 0 offen offset:192 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[52:55], v60, s[4:7], 0 offen offset:208 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[56:59], v60, s[4:7], 0 offen offset:224 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[60:63], v60, s[4:7], 0 offen offset:240 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(15) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[1:4], v65, s[12:15], 0 offen +; SDAG-GFX1100-NEXT: buffer_store_b128 v[0:3], v64, s[12:15], 0 offen ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(14) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[5:8], v65, s[12:15], 0 offen offset:16 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[4:7], v64, s[12:15], 0 offen offset:16 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(13) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[9:12], v65, s[12:15], 0 offen offset:32 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[8:11], v64, s[12:15], 0 offen offset:32 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(12) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[13:16], v65, s[12:15], 0 offen offset:48 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[12:15], v64, s[12:15], 0 offen offset:48 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(11) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[17:20], v65, s[12:15], 0 offen offset:64 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[16:19], v64, s[12:15], 0 offen offset:64 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(10) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[21:24], v65, s[12:15], 0 offen offset:80 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[20:23], v64, s[12:15], 0 offen offset:80 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(9) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[25:28], v65, s[12:15], 0 offen offset:96 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[24:27], v64, s[12:15], 0 offen offset:96 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(8) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[29:32], v65, s[12:15], 0 offen offset:112 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[28:31], v64, s[12:15], 0 offen offset:112 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(7) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[33:36], v65, s[12:15], 0 offen offset:128 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[32:35], v64, s[12:15], 0 offen offset:128 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(6) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[37:40], v65, s[12:15], 0 offen offset:144 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[36:39], v64, s[12:15], 0 offen offset:144 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(5) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[41:44], v65, s[12:15], 0 offen offset:160 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[40:43], v64, s[12:15], 0 offen offset:160 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(4) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[45:48], v65, s[12:15], 0 offen offset:176 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[44:47], v64, s[12:15], 0 offen offset:176 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(3) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[49:52], v65, s[12:15], 0 offen offset:192 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[48:51], v64, s[12:15], 0 offen offset:192 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(2) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[53:56], v65, s[12:15], 0 offen offset:208 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[52:55], v64, s[12:15], 0 offen offset:208 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(1) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[57:60], v65, s[12:15], 0 offen offset:224 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[56:59], v64, s[12:15], 0 offen offset:224 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(0) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[61:64], v65, s[12:15], 0 offen offset:240 -; SDAG-GFX1100-NEXT: s_cbranch_vccnz .LBB1_1 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[60:63], v64, s[12:15], 0 offen offset:240 +; SDAG-GFX1100-NEXT: s_cbranch_scc1 .LBB1_1 ; SDAG-GFX1100-NEXT: ; %bb.2: ; %memcpy-split ; SDAG-GFX1100-NEXT: s_endpgm ; @@ -957,52 +981,50 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp ; GISEL-GFX942-NEXT: s_mov_b32 s2, s7 ; GISEL-GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GISEL-GFX942-NEXT: s_or_b64 s[6:7], s[6:7], s[2:3] -; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, s16 +; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, 0x100 +; GISEL-GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GISEL-GFX942-NEXT: .LBB1_1: ; %load-store-loop ; GISEL-GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 -; GISEL-GFX942-NEXT: v_add_u32_e32 v1, s0, v0 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[2:5], v1, s[8:11], 0 offen -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[6:9], v1, s[8:11], 0 offen offset:16 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[10:13], v1, s[8:11], 0 offen offset:32 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[14:17], v1, s[8:11], 0 offen offset:48 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[18:21], v1, s[8:11], 0 offen offset:64 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[22:25], v1, s[8:11], 0 offen offset:80 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[26:29], v1, s[8:11], 0 offen offset:96 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[30:33], v1, s[8:11], 0 offen offset:112 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[34:37], v1, s[8:11], 0 offen offset:128 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[38:41], v1, s[8:11], 0 offen offset:144 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[42:45], v1, s[8:11], 0 offen offset:160 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[46:49], v1, s[8:11], 0 offen offset:176 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[50:53], v1, s[8:11], 0 offen offset:192 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[54:57], v1, s[8:11], 0 offen offset:208 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[58:61], v1, s[8:11], 0 offen offset:224 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 a[0:3], v1, s[8:11], 0 offen offset:240 -; GISEL-GFX942-NEXT: v_add_u32_e32 v62, s12, v0 -; GISEL-GFX942-NEXT: v_add_co_u32_e32 v0, vcc, 0x100, v0 -; GISEL-GFX942-NEXT: s_xor_b64 s[2:3], vcc, -1 -; GISEL-GFX942-NEXT: s_xor_b64 s[2:3], s[2:3], -1 -; GISEL-GFX942-NEXT: s_and_b64 vcc, s[2:3], exec +; GISEL-GFX942-NEXT: v_add_u32_e32 v62, s0, v1 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[2:5], v62, s[8:11], 0 offen +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[6:9], v62, s[8:11], 0 offen offset:16 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[10:13], v62, s[8:11], 0 offen offset:32 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[14:17], v62, s[8:11], 0 offen offset:48 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[18:21], v62, s[8:11], 0 offen offset:64 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[22:25], v62, s[8:11], 0 offen offset:80 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[26:29], v62, s[8:11], 0 offen offset:96 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[30:33], v62, s[8:11], 0 offen offset:112 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[34:37], v62, s[8:11], 0 offen offset:128 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[38:41], v62, s[8:11], 0 offen offset:144 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[42:45], v62, s[8:11], 0 offen offset:160 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[46:49], v62, s[8:11], 0 offen offset:176 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[50:53], v62, s[8:11], 0 offen offset:192 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[54:57], v62, s[8:11], 0 offen offset:208 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[58:61], v62, s[8:11], 0 offen offset:224 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 a[0:3], v62, s[8:11], 0 offen offset:240 +; GISEL-GFX942-NEXT: v_add_u32_e32 v63, s12, v1 +; GISEL-GFX942-NEXT: v_add_u32_e32 v1, 0x100, v1 +; GISEL-GFX942-NEXT: v_cmp_lt_u32_e32 vcc, v1, v0 ; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0) -; GISEL-GFX942-NEXT: v_accvgpr_read_b32 v63, a3 ; Reload Reuse -; GISEL-GFX942-NEXT: scratch_store_dwordx3 off, a[0:2], off ; 12-byte Folded Spill -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[4:7], 0 offen -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[6:9], v62, s[4:7], 0 offen offset:16 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[10:13], v62, s[4:7], 0 offen offset:32 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[14:17], v62, s[4:7], 0 offen offset:48 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[18:21], v62, s[4:7], 0 offen offset:64 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[22:25], v62, s[4:7], 0 offen offset:80 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[26:29], v62, s[4:7], 0 offen offset:96 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[30:33], v62, s[4:7], 0 offen offset:112 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[34:37], v62, s[4:7], 0 offen offset:128 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[38:41], v62, s[4:7], 0 offen offset:144 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[42:45], v62, s[4:7], 0 offen offset:160 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[46:49], v62, s[4:7], 0 offen offset:176 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[50:53], v62, s[4:7], 0 offen offset:192 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[54:57], v62, s[4:7], 0 offen offset:208 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[58:61], v62, s[4:7], 0 offen offset:224 -; GISEL-GFX942-NEXT: scratch_load_dwordx3 v[2:4], off, off ; 12-byte Folded Reload +; GISEL-GFX942-NEXT: scratch_store_dwordx4 off, a[0:3], off ; 16-byte Folded Spill +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v63, s[4:7], 0 offen +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[6:9], v63, s[4:7], 0 offen offset:16 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[10:13], v63, s[4:7], 0 offen offset:32 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[14:17], v63, s[4:7], 0 offen offset:48 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[18:21], v63, s[4:7], 0 offen offset:64 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[22:25], v63, s[4:7], 0 offen offset:80 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[26:29], v63, s[4:7], 0 offen offset:96 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[30:33], v63, s[4:7], 0 offen offset:112 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[34:37], v63, s[4:7], 0 offen offset:128 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[38:41], v63, s[4:7], 0 offen offset:144 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[42:45], v63, s[4:7], 0 offen offset:160 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[46:49], v63, s[4:7], 0 offen offset:176 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[50:53], v63, s[4:7], 0 offen offset:192 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[54:57], v63, s[4:7], 0 offen offset:208 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[58:61], v63, s[4:7], 0 offen offset:224 +; GISEL-GFX942-NEXT: scratch_load_dwordx4 v[2:5], off, off ; 16-byte Folded Reload ; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0) -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[4:7], 0 offen offset:240 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v63, s[4:7], 0 offen offset:240 ; GISEL-GFX942-NEXT: s_cbranch_vccnz .LBB1_1 ; GISEL-GFX942-NEXT: ; %bb.2: ; %memcpy-split ; GISEL-GFX942-NEXT: s_endpgm @@ -1037,8 +1059,7 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp ; GISEL-GFX1100-NEXT: ; =>This Inner Loop Header: Depth=1 ; GISEL-GFX1100-NEXT: v_add_nc_u32_e32 v61, s0, v0 ; GISEL-GFX1100-NEXT: v_add_nc_u32_e32 v65, s8, v0 -; GISEL-GFX1100-NEXT: v_add_co_u32 v0, s1, 0x100, v0 -; GISEL-GFX1100-NEXT: s_xor_b32 s1, s1, -1 +; GISEL-GFX1100-NEXT: v_add_nc_u32_e32 v0, 0x100, v0 ; GISEL-GFX1100-NEXT: s_clause 0xf ; GISEL-GFX1100-NEXT: buffer_load_b128 v[1:4], v61, s[4:7], 0 offen ; GISEL-GFX1100-NEXT: buffer_load_b128 v[5:8], v61, s[4:7], 0 offen offset:16 @@ -1056,7 +1077,6 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp ; GISEL-GFX1100-NEXT: buffer_load_b128 v[53:56], v61, s[4:7], 0 offen offset:208 ; GISEL-GFX1100-NEXT: buffer_load_b128 v[57:60], v61, s[4:7], 0 offen offset:224 ; GISEL-GFX1100-NEXT: buffer_load_b128 v[61:64], v61, s[4:7], 0 offen offset:240 -; GISEL-GFX1100-NEXT: s_xor_b32 s1, s1, -1 ; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(15) ; GISEL-GFX1100-NEXT: buffer_store_b128 v[1:4], v65, s[12:15], 0 offen ; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(14) @@ -1089,7 +1109,7 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp ; GISEL-GFX1100-NEXT: buffer_store_b128 v[57:60], v65, s[12:15], 0 offen offset:224 ; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(0) ; GISEL-GFX1100-NEXT: buffer_store_b128 v[61:64], v65, s[12:15], 0 offen offset:240 -; GISEL-GFX1100-NEXT: s_and_b32 vcc_lo, exec_lo, s1 +; GISEL-GFX1100-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x100, v0 ; GISEL-GFX1100-NEXT: s_cbranch_vccnz .LBB1_1 ; GISEL-GFX1100-NEXT: ; %bb.2: ; %memcpy-split ; GISEL-GFX1100-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll index 5134159..0fc54ae 100644 --- a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll +++ b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll @@ -619,43 +619,43 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: s_mov_b64 s[8:9], 0 ; GISEL-NEXT: v_ashrrev_i32_e32 v18, 31, v7 ; GISEL-NEXT: v_ashrrev_i32_e32 v19, 31, v15 -; GISEL-NEXT: v_mov_b32_e32 v10, 0x7f -; GISEL-NEXT: v_mov_b32_e32 v11, 0 +; GISEL-NEXT: v_mov_b32_e32 v16, 0x7f +; GISEL-NEXT: v_mov_b32_e32 v17, 0 ; GISEL-NEXT: v_xor_b32_e32 v0, v18, v4 ; GISEL-NEXT: v_xor_b32_e32 v1, v18, v5 ; GISEL-NEXT: v_xor_b32_e32 v2, v18, v6 ; GISEL-NEXT: v_xor_b32_e32 v3, v18, v7 ; GISEL-NEXT: v_xor_b32_e32 v4, v19, v12 ; GISEL-NEXT: v_xor_b32_e32 v5, v19, v13 -; GISEL-NEXT: v_xor_b32_e32 v14, v19, v14 -; GISEL-NEXT: v_xor_b32_e32 v15, v19, v15 +; GISEL-NEXT: v_xor_b32_e32 v12, v19, v14 +; GISEL-NEXT: v_xor_b32_e32 v13, v19, v15 ; GISEL-NEXT: v_sub_i32_e32 v6, vcc, v0, v18 ; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v1, v18, vcc ; GISEL-NEXT: v_sub_i32_e64 v20, s[4:5], v4, v19 ; GISEL-NEXT: v_subb_u32_e64 v21, s[4:5], v5, v19, s[4:5] -; GISEL-NEXT: v_subb_u32_e32 v12, vcc, v2, v18, vcc -; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v3, v18, vcc -; GISEL-NEXT: v_subb_u32_e64 v4, vcc, v14, v19, s[4:5] -; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v15, v19, vcc -; GISEL-NEXT: v_ffbh_u32_e32 v14, v21 -; GISEL-NEXT: v_ffbh_u32_e32 v15, v20 -; GISEL-NEXT: v_ffbh_u32_e32 v16, v7 -; GISEL-NEXT: v_ffbh_u32_e32 v17, v6 +; GISEL-NEXT: v_subb_u32_e32 v10, vcc, v2, v18, vcc +; GISEL-NEXT: v_subb_u32_e32 v11, vcc, v3, v18, vcc +; GISEL-NEXT: v_subb_u32_e64 v4, vcc, v12, v19, s[4:5] +; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v13, v19, vcc +; GISEL-NEXT: v_ffbh_u32_e32 v12, v21 +; GISEL-NEXT: v_ffbh_u32_e32 v13, v20 +; GISEL-NEXT: v_ffbh_u32_e32 v14, v7 +; GISEL-NEXT: v_ffbh_u32_e32 v15, v6 ; GISEL-NEXT: v_or_b32_e32 v0, v20, v4 ; GISEL-NEXT: v_or_b32_e32 v1, v21, v5 -; GISEL-NEXT: v_or_b32_e32 v2, v6, v12 -; GISEL-NEXT: v_or_b32_e32 v3, v7, v13 -; GISEL-NEXT: v_add_i32_e32 v15, vcc, 32, v15 +; GISEL-NEXT: v_or_b32_e32 v2, v6, v10 +; GISEL-NEXT: v_or_b32_e32 v3, v7, v11 +; GISEL-NEXT: v_add_i32_e32 v13, vcc, 32, v13 ; GISEL-NEXT: v_ffbh_u32_e32 v26, v5 ; GISEL-NEXT: v_ffbh_u32_e32 v27, v4 -; GISEL-NEXT: v_add_i32_e32 v17, vcc, 32, v17 -; GISEL-NEXT: v_ffbh_u32_e32 v28, v13 -; GISEL-NEXT: v_ffbh_u32_e32 v29, v12 +; GISEL-NEXT: v_add_i32_e32 v15, vcc, 32, v15 +; GISEL-NEXT: v_ffbh_u32_e32 v28, v11 +; GISEL-NEXT: v_ffbh_u32_e32 v29, v10 ; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] ; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[2:3] -; GISEL-NEXT: v_min_u32_e32 v0, v14, v15 +; GISEL-NEXT: v_min_u32_e32 v0, v12, v13 ; GISEL-NEXT: v_add_i32_e64 v1, s[6:7], 32, v27 -; GISEL-NEXT: v_min_u32_e32 v2, v16, v17 +; GISEL-NEXT: v_min_u32_e32 v2, v14, v15 ; GISEL-NEXT: v_add_i32_e64 v3, s[6:7], 32, v29 ; GISEL-NEXT: v_add_i32_e64 v0, s[6:7], 64, v0 ; GISEL-NEXT: v_min_u32_e32 v1, v26, v1 @@ -665,32 +665,32 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: v_cndmask_b32_e64 v14, 0, 1, s[4:5] ; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5] ; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[12:13] +; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11] ; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc ; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v0, v1 ; GISEL-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, vcc ; GISEL-NEXT: v_subb_u32_e64 v0, s[4:5], 0, 0, s[4:5] ; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, s[4:5] -; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[10:11] +; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[16:17] ; GISEL-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc -; GISEL-NEXT: v_xor_b32_e32 v10, 0x7f, v2 +; GISEL-NEXT: v_xor_b32_e32 v12, 0x7f, v2 ; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[0:1] ; GISEL-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc -; GISEL-NEXT: v_or_b32_e32 v10, v10, v0 -; GISEL-NEXT: v_or_b32_e32 v11, v3, v1 +; GISEL-NEXT: v_or_b32_e32 v12, v12, v0 +; GISEL-NEXT: v_or_b32_e32 v13, v3, v1 ; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] ; GISEL-NEXT: v_cndmask_b32_e32 v15, v16, v15, vcc -; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11] -; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc -; GISEL-NEXT: v_or_b32_e32 v11, v14, v15 -; GISEL-NEXT: v_and_b32_e32 v14, 1, v11 -; GISEL-NEXT: v_or_b32_e32 v10, v11, v10 +; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[12:13] +; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc +; GISEL-NEXT: v_or_b32_e32 v13, v14, v15 +; GISEL-NEXT: v_and_b32_e32 v14, 1, v13 +; GISEL-NEXT: v_or_b32_e32 v12, v13, v12 ; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14 ; GISEL-NEXT: v_cndmask_b32_e64 v14, v6, 0, vcc -; GISEL-NEXT: v_and_b32_e32 v16, 1, v10 +; GISEL-NEXT: v_and_b32_e32 v16, 1, v12 ; GISEL-NEXT: v_cndmask_b32_e64 v15, v7, 0, vcc -; GISEL-NEXT: v_cndmask_b32_e64 v10, v12, 0, vcc -; GISEL-NEXT: v_cndmask_b32_e64 v11, v13, 0, vcc +; GISEL-NEXT: v_cndmask_b32_e64 v12, v10, 0, vcc +; GISEL-NEXT: v_cndmask_b32_e64 v13, v11, 0, vcc ; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16 ; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1 ; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5] @@ -703,22 +703,22 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: v_addc_u32_e64 v28, vcc, 0, v0, s[4:5] ; GISEL-NEXT: v_addc_u32_e32 v29, vcc, 0, v1, vcc ; GISEL-NEXT: v_add_i32_e64 v14, s[4:5], v30, v2 -; GISEL-NEXT: v_sub_i32_e64 v10, s[4:5], 64, v30 +; GISEL-NEXT: v_sub_i32_e64 v12, s[4:5], 64, v30 ; GISEL-NEXT: v_lshl_b64 v[0:1], v[6:7], v30 -; GISEL-NEXT: v_lshl_b64 v[2:3], v[12:13], v30 +; GISEL-NEXT: v_lshl_b64 v[2:3], v[10:11], v30 ; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1 -; GISEL-NEXT: v_lshr_b64 v[10:11], v[6:7], v10 +; GISEL-NEXT: v_lshr_b64 v[12:13], v[6:7], v12 ; GISEL-NEXT: v_lshl_b64 v[16:17], v[6:7], v14 ; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v30 ; GISEL-NEXT: v_cndmask_b32_e32 v14, 0, v0, vcc ; GISEL-NEXT: v_cndmask_b32_e32 v15, 0, v1, vcc -; GISEL-NEXT: v_or_b32_e32 v0, v10, v2 -; GISEL-NEXT: v_or_b32_e32 v1, v11, v3 +; GISEL-NEXT: v_or_b32_e32 v0, v12, v2 +; GISEL-NEXT: v_or_b32_e32 v1, v13, v3 ; GISEL-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc ; GISEL-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc ; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30 -; GISEL-NEXT: v_cndmask_b32_e32 v10, v0, v12, vcc -; GISEL-NEXT: v_cndmask_b32_e32 v11, v1, v13, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v12, v0, v10, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v13, v1, v11, vcc ; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9] ; GISEL-NEXT: v_mov_b32_e32 v0, s8 ; GISEL-NEXT: v_mov_b32_e32 v1, s9 @@ -730,26 +730,26 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: ; %bb.8: ; %udiv-preheader ; GISEL-NEXT: v_add_i32_e32 v32, vcc, 0xffffffc0, v26 ; GISEL-NEXT: v_sub_i32_e32 v16, vcc, 64, v26 -; GISEL-NEXT: v_lshr_b64 v[0:1], v[12:13], v26 +; GISEL-NEXT: v_lshr_b64 v[0:1], v[10:11], v26 ; GISEL-NEXT: v_lshr_b64 v[2:3], v[6:7], v26 ; GISEL-NEXT: s_mov_b64 s[4:5], 0 ; GISEL-NEXT: v_add_i32_e32 v30, vcc, -1, v20 ; GISEL-NEXT: v_addc_u32_e32 v31, vcc, -1, v21, vcc -; GISEL-NEXT: v_lshl_b64 v[16:17], v[12:13], v16 -; GISEL-NEXT: v_lshr_b64 v[12:13], v[12:13], v32 +; GISEL-NEXT: v_lshl_b64 v[16:17], v[10:11], v16 +; GISEL-NEXT: v_lshr_b64 v[10:11], v[10:11], v32 ; GISEL-NEXT: v_addc_u32_e32 v32, vcc, -1, v4, vcc ; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v5, vcc ; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5] ; GISEL-NEXT: v_or_b32_e32 v2, v2, v16 ; GISEL-NEXT: v_or_b32_e32 v3, v3, v17 ; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v26 -; GISEL-NEXT: v_cndmask_b32_e32 v2, v12, v2, vcc -; GISEL-NEXT: v_cndmask_b32_e32 v3, v13, v3, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc ; GISEL-NEXT: v_cndmask_b32_e32 v16, 0, v0, vcc ; GISEL-NEXT: v_cndmask_b32_e32 v17, 0, v1, vcc ; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v26 -; GISEL-NEXT: v_cndmask_b32_e32 v12, v2, v6, vcc -; GISEL-NEXT: v_cndmask_b32_e32 v13, v3, v7, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v10, v2, v6, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v11, v3, v7, vcc ; GISEL-NEXT: v_mov_b32_e32 v7, 0 ; GISEL-NEXT: v_mov_b32_e32 v0, s4 ; GISEL-NEXT: v_mov_b32_e32 v1, s5 @@ -757,20 +757,20 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: v_mov_b32_e32 v3, s7 ; GISEL-NEXT: .LBB0_9: ; %udiv-do-while ; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 -; GISEL-NEXT: v_lshl_b64 v[2:3], v[12:13], 1 +; GISEL-NEXT: v_lshl_b64 v[2:3], v[10:11], 1 ; GISEL-NEXT: v_lshl_b64 v[16:17], v[16:17], 1 -; GISEL-NEXT: v_lshrrev_b32_e32 v6, 31, v13 -; GISEL-NEXT: v_lshrrev_b32_e32 v34, 31, v11 -; GISEL-NEXT: v_lshl_b64 v[12:13], v[14:15], 1 -; GISEL-NEXT: v_lshl_b64 v[10:11], v[10:11], 1 +; GISEL-NEXT: v_lshrrev_b32_e32 v6, 31, v11 +; GISEL-NEXT: v_lshrrev_b32_e32 v34, 31, v13 +; GISEL-NEXT: v_lshl_b64 v[10:11], v[14:15], 1 +; GISEL-NEXT: v_lshl_b64 v[12:13], v[12:13], 1 ; GISEL-NEXT: v_lshrrev_b32_e32 v14, 31, v15 ; GISEL-NEXT: v_add_i32_e32 v26, vcc, -1, v26 ; GISEL-NEXT: v_addc_u32_e32 v27, vcc, -1, v27, vcc ; GISEL-NEXT: v_or_b32_e32 v16, v16, v6 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v34 -; GISEL-NEXT: v_or_b32_e32 v10, v10, v14 -; GISEL-NEXT: v_or_b32_e32 v14, v0, v12 -; GISEL-NEXT: v_or_b32_e32 v15, v1, v13 +; GISEL-NEXT: v_or_b32_e32 v12, v12, v14 +; GISEL-NEXT: v_or_b32_e32 v14, v0, v10 +; GISEL-NEXT: v_or_b32_e32 v15, v1, v11 ; GISEL-NEXT: v_addc_u32_e32 v28, vcc, -1, v28, vcc ; GISEL-NEXT: v_addc_u32_e32 v29, vcc, -1, v29, vcc ; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v30, v2 @@ -783,14 +783,14 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v6 ; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5] ; GISEL-NEXT: v_and_b32_e32 v6, 1, v0 -; GISEL-NEXT: v_and_b32_e32 v12, v0, v20 -; GISEL-NEXT: v_and_b32_e32 v13, v0, v21 +; GISEL-NEXT: v_and_b32_e32 v10, v0, v20 +; GISEL-NEXT: v_and_b32_e32 v11, v0, v21 ; GISEL-NEXT: v_and_b32_e32 v34, v0, v4 ; GISEL-NEXT: v_and_b32_e32 v35, v0, v5 ; GISEL-NEXT: v_mov_b32_e32 v0, v6 ; GISEL-NEXT: v_mov_b32_e32 v1, v7 -; GISEL-NEXT: v_sub_i32_e32 v12, vcc, v2, v12 -; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v3, v13, vcc +; GISEL-NEXT: v_sub_i32_e32 v10, vcc, v2, v10 +; GISEL-NEXT: v_subb_u32_e32 v11, vcc, v3, v11, vcc ; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v16, v34, vcc ; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v17, v35, vcc ; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5] @@ -800,9 +800,9 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: .LBB0_11: ; %Flow11 ; GISEL-NEXT: s_or_b64 exec, exec, s[8:9] ; GISEL-NEXT: v_lshl_b64 v[2:3], v[14:15], 1 -; GISEL-NEXT: v_lshl_b64 v[10:11], v[10:11], 1 +; GISEL-NEXT: v_lshl_b64 v[12:13], v[12:13], 1 ; GISEL-NEXT: v_lshrrev_b32_e32 v4, 31, v15 -; GISEL-NEXT: v_or_b32_e32 v10, v10, v4 +; GISEL-NEXT: v_or_b32_e32 v12, v12, v4 ; GISEL-NEXT: v_or_b32_e32 v14, v0, v2 ; GISEL-NEXT: v_or_b32_e32 v15, v1, v3 ; GISEL-NEXT: .LBB0_12: ; %Flow12 @@ -815,8 +815,8 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: v_xor_b32_e32 v6, v9, v3 ; GISEL-NEXT: v_xor_b32_e32 v4, v14, v7 ; GISEL-NEXT: v_xor_b32_e32 v5, v15, v7 -; GISEL-NEXT: v_xor_b32_e32 v8, v10, v7 -; GISEL-NEXT: v_xor_b32_e32 v9, v11, v7 +; GISEL-NEXT: v_xor_b32_e32 v8, v12, v7 +; GISEL-NEXT: v_xor_b32_e32 v9, v13, v7 ; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v3 ; GISEL-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc ; GISEL-NEXT: v_sub_i32_e64 v4, s[4:5], v4, v7 diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll index e7af746..e042157 100644 --- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll +++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll @@ -20,7 +20,8 @@ define i128 @fptosi_f64_to_i128(double %x) { ; SDAG-NEXT: s_cbranch_execz .LBB0_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc @@ -386,7 +387,8 @@ define i128 @fptoui_f64_to_i128(double %x) { ; SDAG-NEXT: s_cbranch_execz .LBB1_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc @@ -749,9 +751,10 @@ define i128 @fptosi_f32_to_i128(float %x) { ; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc ; SDAG-NEXT: s_cbranch_execz .LBB2_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end -; SDAG-NEXT: v_mov_b32_e32 v6, 0 ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_mov_b32_e32 v6, 0 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc @@ -1100,9 +1103,10 @@ define i128 @fptoui_f32_to_i128(float %x) { ; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc ; SDAG-NEXT: s_cbranch_execz .LBB3_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end -; SDAG-NEXT: v_mov_b32_e32 v6, 0 ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_mov_b32_e32 v6, 0 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc @@ -1489,9 +1493,10 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) { ; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc ; SDAG-NEXT: s_cbranch_execz .LBB6_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end -; SDAG-NEXT: v_mov_b32_e32 v6, 0 ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_mov_b32_e32 v6, 0 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc @@ -1836,9 +1841,10 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) { ; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc ; SDAG-NEXT: s_cbranch_execz .LBB7_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end -; SDAG-NEXT: v_mov_b32_e32 v6, 0 ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_mov_b32_e32 v6, 0 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll index 05403f0..a50791e 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll @@ -7575,15 +7575,13 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB38_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] ; GFX7-NEXT: v_mov_b32_e32 v0, v8 ; GFX7-NEXT: v_mov_b32_e32 v1, v9 @@ -7593,7 +7591,9 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v11, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB38_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7609,15 +7609,13 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB38_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, v8 @@ -7628,7 +7626,9 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 ; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v11, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB38_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7809,15 +7809,13 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040 +; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB39_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] ; GFX7-NEXT: v_mov_b32_e32 v0, v8 ; GFX7-NEXT: v_mov_b32_e32 v1, v9 @@ -7827,7 +7825,9 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v11, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB39_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7843,15 +7843,13 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040 +; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB39_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, v8 @@ -7862,7 +7860,9 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 ; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v11, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB39_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8039,34 +8039,32 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_neg__amdgpu_no_fine_g ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: s_movk_i32 s4, 0xf800 -; GFX7-NEXT: v_mov_b32_e32 v7, v1 -; GFX7-NEXT: v_mov_b32_e32 v6, v0 ; GFX7-NEXT: s_mov_b32 s5, -1 ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s6, 0 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 -; GFX7-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6 +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64 +; GFX7-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 -; GFX7-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc +; GFX7-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 ; GFX7-NEXT: .LBB40_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 -; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v8 -; GFX7-NEXT: v_mov_b32_e32 v1, v9 -; GFX7-NEXT: v_mov_b32_e32 v2, v10 -; GFX7-NEXT: v_mov_b32_e32 v3, v11 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc +; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB40_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8077,35 +8075,33 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_neg__amdgpu_no_fine_g ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: s_movk_i32 s4, 0xf800 -; GFX6-NEXT: v_mov_b32_e32 v7, v1 -; GFX6-NEXT: v_mov_b32_e32 v6, v0 ; GFX6-NEXT: s_mov_b32 s5, -1 ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s6, 0 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 -; GFX6-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6 +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64 +; GFX6-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 -; GFX6-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc +; GFX6-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 ; GFX6-NEXT: .LBB40_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 -; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] +; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, v8 -; GFX6-NEXT: v_mov_b32_e32 v1, v9 -; GFX6-NEXT: v_mov_b32_e32 v2, v10 -; GFX6-NEXT: v_mov_b32_e32 v3, v11 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB40_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll index ac223fd..311faac 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll @@ -4203,25 +4203,25 @@ define double @global_agent_atomic_fmax_ret_f64__amdgpu_no_remote_memory(ptr add ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 -; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3] +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64 +; GFX7-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3] ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 -; GFX7-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX7-NEXT: v_max_f64 v[8:9], v[0:1], v[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v8 -; GFX7-NEXT: v_mov_b32_e32 v1, v9 -; GFX7-NEXT: v_mov_b32_e32 v2, v10 -; GFX7-NEXT: v_mov_b32_e32 v3, v11 +; GFX7-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9] +; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 ; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB24_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4237,26 +4237,25 @@ define double @global_agent_atomic_fmax_ret_f64__amdgpu_no_remote_memory(ptr add ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 -; GFX6-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3] +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64 +; GFX6-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3] ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX6-NEXT: v_max_f64 v[8:9], v[0:1], v[6:7] -; GFX6-NEXT: v_mov_b32_e32 v0, v8 -; GFX6-NEXT: v_mov_b32_e32 v1, v9 -; GFX6-NEXT: v_mov_b32_e32 v2, v10 -; GFX6-NEXT: v_mov_b32_e32 v3, v11 +; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; GFX6-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9] +; GFX6-NEXT: v_max_f64 v[6:7], v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 ; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB24_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll index 5653f85..e2808ee 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll @@ -4203,25 +4203,25 @@ define double @global_agent_atomic_fmin_ret_f64__amdgpu_no_remote_memory(ptr add ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 -; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3] +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64 +; GFX7-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3] ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 -; GFX7-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX7-NEXT: v_min_f64 v[8:9], v[0:1], v[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v8 -; GFX7-NEXT: v_mov_b32_e32 v1, v9 -; GFX7-NEXT: v_mov_b32_e32 v2, v10 -; GFX7-NEXT: v_mov_b32_e32 v3, v11 +; GFX7-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9] +; GFX7-NEXT: v_min_f64 v[6:7], v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 ; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB24_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4237,26 +4237,25 @@ define double @global_agent_atomic_fmin_ret_f64__amdgpu_no_remote_memory(ptr add ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 -; GFX6-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3] +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64 +; GFX6-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3] ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX6-NEXT: v_min_f64 v[8:9], v[0:1], v[6:7] -; GFX6-NEXT: v_mov_b32_e32 v0, v8 -; GFX6-NEXT: v_mov_b32_e32 v1, v9 -; GFX6-NEXT: v_mov_b32_e32 v2, v10 -; GFX6-NEXT: v_mov_b32_e32 v3, v11 +; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; GFX6-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9] +; GFX6-NEXT: v_min_f64 v[6:7], v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 ; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB24_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll index f0e1615..11f0f38 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll @@ -3913,15 +3913,13 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] ; GFX7-NEXT: v_mov_b32_e32 v0, v8 ; GFX7-NEXT: v_mov_b32_e32 v1, v9 @@ -3931,7 +3929,9 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v11, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB16_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3947,15 +3947,13 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, v8 @@ -3966,7 +3964,9 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 ; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v11, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB16_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4165,15 +4165,13 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1) ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040 +; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] ; GFX7-NEXT: v_mov_b32_e32 v0, v8 ; GFX7-NEXT: v_mov_b32_e32 v1, v9 @@ -4183,7 +4181,9 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1) ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v11, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB17_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4199,15 +4199,13 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1) ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040 +; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, v8 @@ -4218,7 +4216,9 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1) ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 ; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v11, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB17_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4413,34 +4413,32 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_neg(ptr addrspace(1) ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: s_movk_i32 s4, 0xf800 -; GFX7-NEXT: v_mov_b32_e32 v7, v1 -; GFX7-NEXT: v_mov_b32_e32 v6, v0 ; GFX7-NEXT: s_mov_b32 s5, -1 ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s6, 0 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 -; GFX7-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6 +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64 +; GFX7-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 -; GFX7-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc +; GFX7-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 ; GFX7-NEXT: .LBB18_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 -; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v8 -; GFX7-NEXT: v_mov_b32_e32 v1, v9 -; GFX7-NEXT: v_mov_b32_e32 v2, v10 -; GFX7-NEXT: v_mov_b32_e32 v3, v11 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc +; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], -v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB18_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4451,35 +4449,33 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_neg(ptr addrspace(1) ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: s_movk_i32 s4, 0xf800 -; GFX6-NEXT: v_mov_b32_e32 v7, v1 -; GFX6-NEXT: v_mov_b32_e32 v6, v0 ; GFX6-NEXT: s_mov_b32 s5, -1 ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s6, 0 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 -; GFX6-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6 +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64 +; GFX6-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 -; GFX6-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc +; GFX6-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 ; GFX6-NEXT: .LBB18_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 -; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] +; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], -v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, v8 -; GFX6-NEXT: v_mov_b32_e32 v1, v9 -; GFX6-NEXT: v_mov_b32_e32 v2, v10 -; GFX6-NEXT: v_mov_b32_e32 v3, v11 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB18_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll index 74f0f64..6a4c284 100644 --- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll +++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll @@ -1502,13 +1502,11 @@ define i64 @global_atomic_sub_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB32_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_sub_i32_e32 v8, vcc, v10, v7 ; SI-NEXT: v_subb_u32_e32 v9, vcc, v11, v6, vcc ; SI-NEXT: s_waitcnt expcnt(0) @@ -1521,6 +1519,8 @@ define i64 @global_atomic_sub_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB32_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1593,13 +1593,11 @@ define i64 @global_atomic_sub_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB33_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_sub_i32_e32 v8, vcc, v10, v7 ; SI-NEXT: v_subb_u32_e32 v9, vcc, v11, v6, vcc ; SI-NEXT: s_waitcnt expcnt(0) @@ -1612,6 +1610,8 @@ define i64 @global_atomic_sub_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB33_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1883,43 +1883,42 @@ define amdgpu_gfx i64 @global_atomic_sub_i64_ret_scalar(ptr addrspace(1) inreg % ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v9, s6, 0 -; SI-NEXT: v_writelane_b32 v9, s7, 1 +; SI-NEXT: v_writelane_b32 v7, s6, 0 +; SI-NEXT: v_writelane_b32 v7, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 +; SI-NEXT: v_mov_b32_e32 v6, s35 ; SI-NEXT: .LBB36_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v8, v1 -; SI-NEXT: v_mov_b32_e32 v7, v0 -; SI-NEXT: v_subrev_i32_e32 v5, vcc, s34, v7 -; SI-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v5 -; SI-NEXT: v_mov_b32_e32 v1, v6 -; SI-NEXT: v_mov_b32_e32 v2, v7 -; SI-NEXT: v_mov_b32_e32 v3, v8 +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; SI-NEXT: v_subrev_i32_e32 v2, vcc, s34, v4 +; SI-NEXT: v_subb_u32_e32 v3, vcc, v5, v6, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB36_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v9, 1 -; SI-NEXT: v_readlane_b32 s6, v9, 0 +; SI-NEXT: v_readlane_b32 s7, v7, 1 +; SI-NEXT: v_readlane_b32 s6, v7, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -1985,43 +1984,42 @@ define amdgpu_gfx i64 @global_atomic_sub_i64_ret_offset_scalar(ptr addrspace(1) ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v9, s6, 0 -; SI-NEXT: v_writelane_b32 v9, s7, 1 +; SI-NEXT: v_writelane_b32 v7, s6, 0 +; SI-NEXT: v_writelane_b32 v7, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 +; SI-NEXT: v_mov_b32_e32 v6, s35 ; SI-NEXT: .LBB37_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v8, v1 -; SI-NEXT: v_mov_b32_e32 v7, v0 -; SI-NEXT: v_subrev_i32_e32 v5, vcc, s34, v7 -; SI-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v5 -; SI-NEXT: v_mov_b32_e32 v1, v6 -; SI-NEXT: v_mov_b32_e32 v2, v7 -; SI-NEXT: v_mov_b32_e32 v3, v8 +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; SI-NEXT: v_subrev_i32_e32 v2, vcc, s34, v4 +; SI-NEXT: v_subb_u32_e32 v3, vcc, v5, v6, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB37_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v9, 1 -; SI-NEXT: v_readlane_b32 s6, v9, 0 +; SI-NEXT: v_readlane_b32 s7, v7, 1 +; SI-NEXT: v_readlane_b32 s6, v7, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -2342,13 +2340,11 @@ define i64 @global_atomic_and_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB42_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_and_b32_e32 v9, v11, v6 ; SI-NEXT: v_and_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -2361,6 +2357,8 @@ define i64 @global_atomic_and_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB42_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2433,13 +2431,11 @@ define i64 @global_atomic_and_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB43_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_and_b32_e32 v9, v11, v6 ; SI-NEXT: v_and_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -2452,6 +2448,8 @@ define i64 @global_atomic_and_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB43_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2726,14 +2724,11 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_scalar(ptr addrspace(1) inreg % ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB46_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v3, s34, v5 ; SI-NEXT: v_and_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -2745,6 +2740,8 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_scalar(ptr addrspace(1) inreg % ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB46_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2825,14 +2822,11 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB47_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v3, s34, v5 ; SI-NEXT: v_and_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -2844,6 +2838,8 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB47_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3182,14 +3178,11 @@ define i64 @global_atomic_nand_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB52_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v0, v11, v6 ; SI-NEXT: v_and_b32_e32 v1, v10, v7 ; SI-NEXT: v_not_b32_e32 v9, v0 @@ -3203,6 +3196,8 @@ define i64 @global_atomic_nand_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB52_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3279,14 +3274,11 @@ define i64 @global_atomic_nand_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB53_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v0, v11, v6 ; SI-NEXT: v_and_b32_e32 v1, v10, v7 ; SI-NEXT: v_not_b32_e32 v9, v0 @@ -3300,6 +3292,8 @@ define i64 @global_atomic_nand_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB53_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3590,14 +3584,11 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_scalar(ptr addrspace(1) inreg ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB56_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v0, s34, v5 ; SI-NEXT: v_and_b32_e32 v1, s35, v4 ; SI-NEXT: v_not_b32_e32 v3, v0 @@ -3611,6 +3602,8 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_scalar(ptr addrspace(1) inreg ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB56_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3695,14 +3688,11 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB57_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v0, s34, v5 ; SI-NEXT: v_and_b32_e32 v1, s35, v4 ; SI-NEXT: v_not_b32_e32 v3, v0 @@ -3716,6 +3706,8 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB57_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3891,14 +3883,11 @@ define i64 @global_atomic_nand_i64_ret_offset__amdgpu_no_remote_memory(ptr addrs ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB59_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v0, v11, v6 ; SI-NEXT: v_and_b32_e32 v1, v10, v7 ; SI-NEXT: v_not_b32_e32 v9, v0 @@ -3912,6 +3901,8 @@ define i64 @global_atomic_nand_i64_ret_offset__amdgpu_no_remote_memory(ptr addrs ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB59_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4162,13 +4153,11 @@ define i64 @global_atomic_or_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB62_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_or_b32_e32 v9, v11, v6 ; SI-NEXT: v_or_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -4181,6 +4170,8 @@ define i64 @global_atomic_or_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB62_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4253,13 +4244,11 @@ define i64 @global_atomic_or_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB63_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_or_b32_e32 v9, v11, v6 ; SI-NEXT: v_or_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -4272,6 +4261,8 @@ define i64 @global_atomic_or_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB63_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4546,14 +4537,11 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_scalar(ptr addrspace(1) inreg %p ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB66_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_or_b32_e32 v3, s34, v5 ; SI-NEXT: v_or_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -4565,6 +4553,8 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_scalar(ptr addrspace(1) inreg %p ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB66_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4645,14 +4635,11 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_offset_scalar(ptr addrspace(1) i ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB67_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_or_b32_e32 v3, s34, v5 ; SI-NEXT: v_or_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -4664,6 +4651,8 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_offset_scalar(ptr addrspace(1) i ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB67_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4990,13 +4979,11 @@ define i64 @global_atomic_xor_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB72_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_xor_b32_e32 v9, v11, v6 ; SI-NEXT: v_xor_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -5009,6 +4996,8 @@ define i64 @global_atomic_xor_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB72_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5081,13 +5070,11 @@ define i64 @global_atomic_xor_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB73_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_xor_b32_e32 v9, v11, v6 ; SI-NEXT: v_xor_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -5100,6 +5087,8 @@ define i64 @global_atomic_xor_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB73_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5374,14 +5363,11 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_scalar(ptr addrspace(1) inreg % ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB76_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_xor_b32_e32 v3, s34, v5 ; SI-NEXT: v_xor_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -5393,6 +5379,8 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_scalar(ptr addrspace(1) inreg % ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB76_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5473,14 +5461,11 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB77_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_xor_b32_e32 v3, s34, v5 ; SI-NEXT: v_xor_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -5492,6 +5477,8 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB77_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5824,13 +5811,11 @@ define i64 @global_atomic_max_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB82_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -5844,6 +5829,8 @@ define i64 @global_atomic_max_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB82_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5918,13 +5905,11 @@ define i64 @global_atomic_max_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB83_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -5938,6 +5923,8 @@ define i64 @global_atomic_max_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB83_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6223,45 +6210,45 @@ define amdgpu_gfx i64 @global_atomic_max_i64_ret_scalar(ptr addrspace(1) inreg % ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB86_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB86_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -6331,45 +6318,45 @@ define amdgpu_gfx i64 @global_atomic_max_i64_ret_offset_scalar(ptr addrspace(1) ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB87_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB87_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -7176,13 +7163,11 @@ define i64 @global_atomic_umax_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB96_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -7196,6 +7181,8 @@ define i64 @global_atomic_umax_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB96_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7270,13 +7257,11 @@ define i64 @global_atomic_umax_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB97_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -7290,6 +7275,8 @@ define i64 @global_atomic_umax_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB97_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7575,45 +7562,45 @@ define amdgpu_gfx i64 @global_atomic_umax_i64_ret_scalar(ptr addrspace(1) inreg ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB100_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB100_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -7683,45 +7670,45 @@ define amdgpu_gfx i64 @global_atomic_umax_i64_ret_offset_scalar(ptr addrspace(1) ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB101_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB101_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -8416,13 +8403,11 @@ define i64 @global_atomic_umin_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB109_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_le_u64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -8436,6 +8421,8 @@ define i64 @global_atomic_umin_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB109_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8510,13 +8497,11 @@ define i64 @global_atomic_umin_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB110_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_le_u64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -8530,6 +8515,8 @@ define i64 @global_atomic_umin_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB110_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8815,45 +8802,45 @@ define amdgpu_gfx i64 @global_atomic_umin_i64_ret_scalar(ptr addrspace(1) inreg ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB113_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB113_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -8923,45 +8910,45 @@ define amdgpu_gfx i64 @global_atomic_umin_i64_ret_offset_scalar(ptr addrspace(1) ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB114_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB114_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -9292,13 +9279,11 @@ define i64 @global_atomic_min_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB119_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_le_i64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -9312,6 +9297,8 @@ define i64 @global_atomic_min_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB119_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -9386,13 +9373,11 @@ define i64 @global_atomic_min_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB120_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_le_i64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -9406,6 +9391,8 @@ define i64 @global_atomic_min_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB120_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -9691,45 +9678,45 @@ define amdgpu_gfx i64 @global_atomic_min_i64_ret_scalar(ptr addrspace(1) inreg % ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB123_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB123_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -9799,45 +9786,45 @@ define amdgpu_gfx i64 @global_atomic_min_i64_ret_offset_scalar(ptr addrspace(1) ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB124_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB124_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -10645,14 +10632,11 @@ define i64 @global_atomic_uinc_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB133_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v10 ; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v11, vcc ; SI-NEXT: v_cmp_lt_u64_e32 vcc, v[10:11], v[4:5] @@ -10667,6 +10651,8 @@ define i64 @global_atomic_uinc_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB133_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -10745,14 +10731,11 @@ define i64 @global_atomic_uinc_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB134_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v10 ; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v11, vcc ; SI-NEXT: v_cmp_lt_u64_e32 vcc, v[10:11], v[4:5] @@ -10767,6 +10750,8 @@ define i64 @global_atomic_uinc_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB134_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -11065,14 +11050,11 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_scalar(ptr addrspace(1) i ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB137_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v4 ; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc ; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[34:35], v[4:5] @@ -11087,6 +11069,8 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_scalar(ptr addrspace(1) i ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB137_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -11173,14 +11157,11 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_offset_scalar(ptr addrspa ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB138_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v4 ; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc ; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[34:35], v[4:5] @@ -11195,6 +11176,8 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_offset_scalar(ptr addrspa ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB138_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -11557,14 +11540,11 @@ define i64 @global_atomic_udec_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s11, 0xf000 ; SI-NEXT: s_mov_b32 s8, s10 ; SI-NEXT: s_mov_b32 s9, s10 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[8:11], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[8:11], 0 addr64 ; SI-NEXT: s_mov_b64 s[6:7], 0 ; SI-NEXT: .LBB143_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v10 ; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v11, vcc ; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11] @@ -11581,6 +11561,8 @@ define i64 @global_atomic_udec_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[6:7] ; SI-NEXT: s_cbranch_execnz .LBB143_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -11663,14 +11645,11 @@ define i64 @global_atomic_udec_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i ; SI-NEXT: s_mov_b32 s11, 0xf000 ; SI-NEXT: s_mov_b32 s8, s10 ; SI-NEXT: s_mov_b32 s9, s10 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[8:11], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[8:11], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[6:7], 0 ; SI-NEXT: .LBB144_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v10 ; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v11, vcc ; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11] @@ -11687,6 +11666,8 @@ define i64 @global_atomic_udec_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[6:7] ; SI-NEXT: s_cbranch_execnz .LBB144_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -12004,49 +11985,48 @@ define amdgpu_gfx i64 @global_atomic_udec_wrap_i64_ret_scalar(ptr addrspace(1) i ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[38:39], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB147_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v8 -; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v9, vcc -; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9] -; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[8:9] +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4 +; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v5, vcc +; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5] +; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[4:5] ; SI-NEXT: s_or_b64 vcc, vcc, s[36:37] -; SI-NEXT: v_cndmask_b32_e32 v7, v1, v4, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v0, v5, vcc -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v1, v6, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v0, v7, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[38:39], vcc, s[38:39] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[38:39] ; SI-NEXT: s_cbranch_execnz .LBB147_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[38:39] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -12124,49 +12104,48 @@ define amdgpu_gfx i64 @global_atomic_udec_wrap_i64_ret_offset_scalar(ptr addrspa ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[38:39], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB148_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v8 -; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v9, vcc -; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9] -; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[8:9] +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4 +; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v5, vcc +; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5] +; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[4:5] ; SI-NEXT: s_or_b64 vcc, vcc, s[36:37] -; SI-NEXT: v_cndmask_b32_e32 v7, v1, v4, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v0, v5, vcc -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v1, v6, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v0, v7, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[38:39], vcc, s[38:39] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[38:39] ; SI-NEXT: s_cbranch_execnz .LBB148_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[38:39] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll index bc3d378..3aa3663 100644 --- a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll +++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll @@ -11,9 +11,9 @@ ; GCN-O0: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O0>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(atomic-expand,verify,gc-lowering,lower-constant-intrinsics,unreachableblockelim,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa,require<uniformity>,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,localstackalloc))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,phi-node-elimination,two-address-instruction,regallocfast,si-fix-vgpr-copies,remove-redundant-debug-values,fixup-statepoint-caller-saved,prolog-epilog,post-ra-pseudos,si-post-ra-bundler,fentry-insert,xray-instrumentation,patchable-function,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) -; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O2>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) +; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O2>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) -; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O3>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) +; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O3>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) define void @empty() { ret void diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll index 65d0102..6e52125 100644 --- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll +++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll @@ -232,15 +232,15 @@ ; GCN-O1-NEXT: AMDGPU Preload Kernel Arguments ; GCN-O1-NEXT: FunctionPass Manager ; GCN-O1-NEXT: AMDGPU Lower Kernel Arguments +; GCN-O1-NEXT: Dominator Tree Construction +; GCN-O1-NEXT: Natural Loop Information +; GCN-O1-NEXT: CodeGen Prepare ; GCN-O1-NEXT: Lower buffer fat pointer operations to buffer resources ; GCN-O1-NEXT: AMDGPU lower intrinsics ; GCN-O1-NEXT: CallGraph Construction ; GCN-O1-NEXT: Call Graph SCC Pass Manager ; GCN-O1-NEXT: DummyCGSCCPass ; GCN-O1-NEXT: FunctionPass Manager -; GCN-O1-NEXT: Dominator Tree Construction -; GCN-O1-NEXT: Natural Loop Information -; GCN-O1-NEXT: CodeGen Prepare ; GCN-O1-NEXT: Lazy Value Information Analysis ; GCN-O1-NEXT: Lower SwitchInst's to branches ; GCN-O1-NEXT: Lower invoke and unwind, for unwindless code generators @@ -533,21 +533,21 @@ ; GCN-O1-OPTS-NEXT: AMDGPU Preload Kernel Arguments ; GCN-O1-OPTS-NEXT: FunctionPass Manager ; GCN-O1-OPTS-NEXT: AMDGPU Lower Kernel Arguments +; GCN-O1-OPTS-NEXT: Dominator Tree Construction +; GCN-O1-OPTS-NEXT: Natural Loop Information +; GCN-O1-OPTS-NEXT: CodeGen Prepare +; GCN-O1-OPTS-NEXT: Dominator Tree Construction +; GCN-O1-OPTS-NEXT: Basic Alias Analysis (stateless AA impl) +; GCN-O1-OPTS-NEXT: Function Alias Analysis Results +; GCN-O1-OPTS-NEXT: Natural Loop Information +; GCN-O1-OPTS-NEXT: Scalar Evolution Analysis +; GCN-O1-OPTS-NEXT: GPU Load and Store Vectorizer ; GCN-O1-OPTS-NEXT: Lower buffer fat pointer operations to buffer resources ; GCN-O1-OPTS-NEXT: AMDGPU lower intrinsics ; GCN-O1-OPTS-NEXT: CallGraph Construction ; GCN-O1-OPTS-NEXT: Call Graph SCC Pass Manager ; GCN-O1-OPTS-NEXT: DummyCGSCCPass ; GCN-O1-OPTS-NEXT: FunctionPass Manager -; GCN-O1-OPTS-NEXT: Dominator Tree Construction -; GCN-O1-OPTS-NEXT: Natural Loop Information -; GCN-O1-OPTS-NEXT: CodeGen Prepare -; GCN-O1-OPTS-NEXT: Dominator Tree Construction -; GCN-O1-OPTS-NEXT: Basic Alias Analysis (stateless AA impl) -; GCN-O1-OPTS-NEXT: Function Alias Analysis Results -; GCN-O1-OPTS-NEXT: Natural Loop Information -; GCN-O1-OPTS-NEXT: Scalar Evolution Analysis -; GCN-O1-OPTS-NEXT: GPU Load and Store Vectorizer ; GCN-O1-OPTS-NEXT: Lazy Value Information Analysis ; GCN-O1-OPTS-NEXT: Lower SwitchInst's to branches ; GCN-O1-OPTS-NEXT: Lower invoke and unwind, for unwindless code generators @@ -852,21 +852,21 @@ ; GCN-O2-NEXT: AMDGPU Preload Kernel Arguments ; GCN-O2-NEXT: FunctionPass Manager ; GCN-O2-NEXT: AMDGPU Lower Kernel Arguments +; GCN-O2-NEXT: Dominator Tree Construction +; GCN-O2-NEXT: Natural Loop Information +; GCN-O2-NEXT: CodeGen Prepare +; GCN-O2-NEXT: Dominator Tree Construction +; GCN-O2-NEXT: Basic Alias Analysis (stateless AA impl) +; GCN-O2-NEXT: Function Alias Analysis Results +; GCN-O2-NEXT: Natural Loop Information +; GCN-O2-NEXT: Scalar Evolution Analysis +; GCN-O2-NEXT: GPU Load and Store Vectorizer ; GCN-O2-NEXT: Lower buffer fat pointer operations to buffer resources ; GCN-O2-NEXT: AMDGPU lower intrinsics ; GCN-O2-NEXT: CallGraph Construction ; GCN-O2-NEXT: Call Graph SCC Pass Manager ; GCN-O2-NEXT: DummyCGSCCPass ; GCN-O2-NEXT: FunctionPass Manager -; GCN-O2-NEXT: Dominator Tree Construction -; GCN-O2-NEXT: Natural Loop Information -; GCN-O2-NEXT: CodeGen Prepare -; GCN-O2-NEXT: Dominator Tree Construction -; GCN-O2-NEXT: Basic Alias Analysis (stateless AA impl) -; GCN-O2-NEXT: Function Alias Analysis Results -; GCN-O2-NEXT: Natural Loop Information -; GCN-O2-NEXT: Scalar Evolution Analysis -; GCN-O2-NEXT: GPU Load and Store Vectorizer ; GCN-O2-NEXT: Lazy Value Information Analysis ; GCN-O2-NEXT: Lower SwitchInst's to branches ; GCN-O2-NEXT: Lower invoke and unwind, for unwindless code generators @@ -1186,21 +1186,21 @@ ; GCN-O3-NEXT: AMDGPU Preload Kernel Arguments ; GCN-O3-NEXT: FunctionPass Manager ; GCN-O3-NEXT: AMDGPU Lower Kernel Arguments +; GCN-O3-NEXT: Dominator Tree Construction +; GCN-O3-NEXT: Natural Loop Information +; GCN-O3-NEXT: CodeGen Prepare +; GCN-O3-NEXT: Dominator Tree Construction +; GCN-O3-NEXT: Basic Alias Analysis (stateless AA impl) +; GCN-O3-NEXT: Function Alias Analysis Results +; GCN-O3-NEXT: Natural Loop Information +; GCN-O3-NEXT: Scalar Evolution Analysis +; GCN-O3-NEXT: GPU Load and Store Vectorizer ; GCN-O3-NEXT: Lower buffer fat pointer operations to buffer resources ; GCN-O3-NEXT: AMDGPU lower intrinsics ; GCN-O3-NEXT: CallGraph Construction ; GCN-O3-NEXT: Call Graph SCC Pass Manager ; GCN-O3-NEXT: DummyCGSCCPass ; GCN-O3-NEXT: FunctionPass Manager -; GCN-O3-NEXT: Dominator Tree Construction -; GCN-O3-NEXT: Natural Loop Information -; GCN-O3-NEXT: CodeGen Prepare -; GCN-O3-NEXT: Dominator Tree Construction -; GCN-O3-NEXT: Basic Alias Analysis (stateless AA impl) -; GCN-O3-NEXT: Function Alias Analysis Results -; GCN-O3-NEXT: Natural Loop Information -; GCN-O3-NEXT: Scalar Evolution Analysis -; GCN-O3-NEXT: GPU Load and Store Vectorizer ; GCN-O3-NEXT: Lazy Value Information Analysis ; GCN-O3-NEXT: Lower SwitchInst's to branches ; GCN-O3-NEXT: Lower invoke and unwind, for unwindless code generators diff --git a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll index ba5ce8b..8bb7274 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll @@ -76,13 +76,12 @@ define amdgpu_kernel void @v_round_f64(ptr addrspace(1) %out, ptr addrspace(1) % ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: s_mov_b64 s[4:5], s[2:3] ; SI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64 -; SI-NEXT: s_movk_i32 s4, 0xfc01 ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_mov_b32 s3, 0xfffff ; SI-NEXT: v_mov_b32_e32 v8, 0x3ff00000 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_bfe_u32 v4, v3, 20, 11 -; SI-NEXT: v_add_i32_e32 v6, vcc, s4, v4 +; SI-NEXT: v_add_i32_e32 v6, vcc, 0xfffffc01, v4 ; SI-NEXT: v_lshr_b64 v[4:5], s[2:3], v6 ; SI-NEXT: v_and_b32_e32 v7, 0x80000000, v3 ; SI-NEXT: v_bfi_b32 v5, v5, 0, v3 diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll index c92c672..ca4f5d2 100644 --- a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll +++ b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll @@ -51,7 +51,7 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) { ; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, s4, v2 ; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, v3, v7, vcc ; CHECK-NEXT: s_add_u32 s4, s4, 1 -; CHECK-NEXT: s_addc_u32 s5, s5, 0 +; CHECK-NEXT: s_addc_u32 s5, 0, s5 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; CHECK-NEXT: flat_store_byte v[6:7], v10 ; CHECK-NEXT: ; %bb.7: diff --git a/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir b/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir index f1f2eb6..c9645c3 100644 --- a/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir +++ b/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir @@ -80,3 +80,151 @@ body: | %4:vreg_128 = REG_SEQUENCE %3.sub0, %subreg.sub0, %3.sub1, %subreg.sub1, %3.sub2, %subreg.sub2, %3.sub3, %subreg.sub3 KILL implicit %4 ... + +--- +name: copy_vreg_64_subreg_from_vgpr_reg_sequence +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence + ; GCN: liveins: $vgpr0, $vgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GCN-NEXT: $vgpr0 = COPY [[COPY2]] + %0:vgpr_32 = COPY $vgpr0 + %1:vgpr_32 = COPY $vgpr1 + %2:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1 + %3:vgpr_32 = COPY %2.sub0 + $vgpr0 = COPY %3 +... + +--- +name: copy_vreg_64_subreg_from_vgpr_reg_sequence_extra_copy +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence_extra_copy + ; GCN: liveins: $vgpr0, $vgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] + ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 + ; GCN-NEXT: $vgpr0 = COPY [[COPY3]] + %0:vgpr_32 = COPY $vgpr0 + %1:vgpr_32 = COPY $vgpr1 + %2:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1 + %3:vreg_64 = COPY %2 + %4:vgpr_32 = COPY %3.sub0 + $vgpr0 = COPY %4 +... + +--- +name: copy_av_64_subreg_from_vgpr_reg_sequence +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; GCN-LABEL: name: copy_av_64_subreg_from_vgpr_reg_sequence + ; GCN: liveins: $vgpr0, $vgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GCN-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE]] + ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 + ; GCN-NEXT: $vgpr0 = COPY [[COPY3]] + %0:vgpr_32 = COPY $vgpr0 + %1:vgpr_32 = COPY $vgpr1 + %2:vreg_64_align2 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1 + %3:av_64_align2 = COPY %2 + %4:vgpr_32 = COPY %3.sub0 + $vgpr0 = COPY %4 +... + +--- +name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub0_compose +body: | + bb.0: + liveins: $vgpr0_vgpr1 + ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub0_compose + ; GCN: liveins: $vgpr0_vgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]].sub0, %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0 + ; GCN-NEXT: $vgpr0 = COPY [[COPY2]] + %0:vreg_64 = COPY $vgpr0_vgpr1 + %1:vgpr_32 = COPY $vgpr1 + %2:vreg_64 = REG_SEQUENCE %0.sub0, %subreg.sub0, %1, %subreg.sub1 + %3:vgpr_32 = COPY %2.sub0 + $vgpr0 = COPY %3 +... + +--- +name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub1_compose +body: | + bb.0: + liveins: $vgpr0_vgpr1 + ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub1_compose + ; GCN: liveins: $vgpr0_vgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]].sub1, %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub1 + ; GCN-NEXT: $vgpr0 = COPY [[COPY2]] + %0:vreg_64 = COPY $vgpr0_vgpr1 + %1:vgpr_32 = COPY $vgpr1 + %2:vreg_64 = REG_SEQUENCE %0.sub1, %subreg.sub0, %1, %subreg.sub1 + %3:vgpr_32 = COPY %2.sub0 + $vgpr0 = COPY %3 +... + +--- +name: copy_vreg_64_subreg_from_multiple_vgpr_reg_sequence +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GCN-LABEL: name: copy_vreg_64_subreg_from_multiple_vgpr_reg_sequence + ; GCN: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] + ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub1_sub2 + ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] + ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0 + ; GCN-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 + ; GCN-NEXT: $vgpr0_vgpr1 = COPY [[COPY4]] + ; GCN-NEXT: $vgpr2_vgpr3 = COPY [[COPY5]] + ; GCN-NEXT: $vgpr4_vgpr5 = COPY [[COPY6]] + ; GCN-NEXT: $vgpr6 = COPY [[COPY7]] + ; GCN-NEXT: $vgpr6 = COPY [[COPY8]] + %0:vgpr_32 = COPY $vgpr0 + %1:vgpr_32 = COPY $vgpr1 + %2:vgpr_32 = COPY $vgpr2 + %3:vgpr_32 = COPY $vgpr3 + %4:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1 + %5:vreg_64 = REG_SEQUENCE %2, %subreg.sub0, %3, %subreg.sub1 + %6:vreg_128 = REG_SEQUENCE %4, %subreg.sub0_sub1, %5, %subreg.sub2_sub3 + %7:vreg_64 = COPY %6.sub0_sub1 + %8:vreg_64 = COPY %6.sub1_sub2 + %9:vreg_64 = COPY %6.sub2_sub3 + %10:vgpr_32 = COPY %6.sub2 + %11:vgpr_32 = COPY %6.sub0 + $vgpr0_vgpr1 = COPY %7 + $vgpr2_vgpr3 = COPY %8 + $vgpr4_vgpr5 = COPY %9 + $vgpr6 = COPY %10 + $vgpr6 = COPY %11 +... diff --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll index aa131ed..760a298 100644 --- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll +++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll @@ -495,8 +495,7 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX900-NEXT: v_mov_b32_e32 v1, s35 ; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s34, v0 ; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc -; GFX900-NEXT: s_movk_i32 s0, 0x5000 -; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 +; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, 0x5000, v0 ; GFX900-NEXT: v_mov_b32_e32 v4, 0 ; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; GFX900-NEXT: v_mov_b32_e32 v5, 0 @@ -718,8 +717,7 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX90A-NEXT: v_mov_b32_e32 v2, s35 ; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, s34, v1 ; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v2, vcc -; GFX90A-NEXT: s_movk_i32 s0, 0x5000 -; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, s0, v1 +; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, 0x5000, v1 ; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], 0, 0 ; GFX90A-NEXT: v_mov_b32_e32 v1, 0x7f diff --git a/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll b/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll index 5d5aad7..566eb1e 100644 --- a/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll +++ b/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll @@ -7,16 +7,12 @@ @gv.fptr0 = external hidden unnamed_addr addrspace(4) constant ptr, align 4 -; GCN-LABEL: unreachable: -; Function info: -; codeLenInByte = 4 define internal fastcc void @unreachable() { %fptr = load ptr, ptr addrspace(4) @gv.fptr0 call void %fptr() unreachable } - ; GCN-LABEL: entry: ; GCN-NOT: s_swappc_b64 ; GCN: s_endpgm diff --git a/llvm/test/CodeGen/ARM/llvm.exp10.ll b/llvm/test/CodeGen/ARM/llvm.exp10.ll index eb72fe8..49397ca 100644 --- a/llvm/test/CodeGen/ARM/llvm.exp10.ll +++ b/llvm/test/CodeGen/ARM/llvm.exp10.ll @@ -189,12 +189,13 @@ define <3 x float> @exp10_v3f32(<3 x float> %x) { ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: bl exp10f +; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: vmov s17, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: bl exp10f ; CHECK-NEXT: vmov s16, r0 +; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: vmov s18, r6 -; CHECK-NEXT: vmov r0, r1, d8 ; CHECK-NEXT: vmov r2, r3, d9 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: pop {r4, r5, r6, pc} @@ -207,7 +208,6 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) { ; CHECK: @ %bb.0: ; CHECK-NEXT: push {r4, r5, r6, r7, lr} ; CHECK-NEXT: sub sp, #4 -; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: mov r4, r3 @@ -216,17 +216,15 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) { ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: bl exp10f -; CHECK-NEXT: vmov s19, r0 +; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: bl exp10f -; CHECK-NEXT: vmov s18, r0 +; CHECK-NEXT: mov r5, r0 ; CHECK-NEXT: mov r0, r6 -; CHECK-NEXT: vmov s17, r7 ; CHECK-NEXT: bl exp10f -; CHECK-NEXT: vmov s16, r0 -; CHECK-NEXT: vmov r2, r3, d9 -; CHECK-NEXT: vmov r0, r1, d8 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: mov r1, r7 +; CHECK-NEXT: mov r2, r5 +; CHECK-NEXT: mov r3, r4 ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop {r4, r5, r6, r7, pc} %r = call <4 x float> @llvm.exp10.v4f32(<4 x float> %x) diff --git a/llvm/test/CodeGen/ARM/llvm.frexp.ll b/llvm/test/CodeGen/ARM/llvm.frexp.ll index 376426d..80972b75 100644 --- a/llvm/test/CodeGen/ARM/llvm.frexp.ll +++ b/llvm/test/CodeGen/ARM/llvm.frexp.ll @@ -362,33 +362,31 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) { define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) { ; CHECK-LABEL: test_frexp_v4f32_v4i32_only_use_fract: ; CHECK: @ %bb.0: -; CHECK-NEXT: push {r4, r5, r6, lr} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: sub sp, #16 -; CHECK-NEXT: mov r5, r1 -; CHECK-NEXT: mov r6, r0 -; CHECK-NEXT: mov r1, sp -; CHECK-NEXT: mov r0, r3 -; CHECK-NEXT: mov r4, r2 -; CHECK-NEXT: bl frexpf +; CHECK-NEXT: push {r4, r5, r6, r7, lr} +; CHECK-NEXT: sub sp, #20 +; CHECK-NEXT: mov r6, r1 ; CHECK-NEXT: add r1, sp, #4 -; CHECK-NEXT: vmov s19, r0 -; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: mov r7, r0 +; CHECK-NEXT: mov r0, r3 +; CHECK-NEXT: mov r5, r2 ; CHECK-NEXT: bl frexpf ; CHECK-NEXT: add r1, sp, #8 -; CHECK-NEXT: vmov s18, r0 +; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: bl frexpf ; CHECK-NEXT: add r1, sp, #12 -; CHECK-NEXT: vmov s17, r0 +; CHECK-NEXT: mov r5, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: bl frexpf -; CHECK-NEXT: vmov s16, r0 -; CHECK-NEXT: vmov r2, r3, d9 -; CHECK-NEXT: vmov r0, r1, d8 -; CHECK-NEXT: add sp, #16 -; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: pop {r4, r5, r6, pc} +; CHECK-NEXT: add r1, sp, #16 +; CHECK-NEXT: mov r6, r0 +; CHECK-NEXT: mov r0, r7 +; CHECK-NEXT: bl frexpf +; CHECK-NEXT: mov r1, r6 +; CHECK-NEXT: mov r2, r5 +; CHECK-NEXT: mov r3, r4 +; CHECK-NEXT: add sp, #20 +; CHECK-NEXT: pop {r4, r5, r6, r7, pc} %result = call { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float> %a) %result.0 = extractvalue { <4 x float>, <4 x i32> } %result, 0 ret <4 x float> %result.0 diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll index 288dea0..b043ea1 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 666, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 666, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll index e9abcf9..8219ffd 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 666, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 666, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll index 238f488..31d8dd1 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 666, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 666, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll index 8dc69eb..2bb4af5 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 666, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 666, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll index b2c8faf..62fda73 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 666, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 666, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll index 758d262..7e8de14 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 45, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 45, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Flag.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Flag.ll new file mode 100644 index 0000000..8f7ef88 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Flag.ll @@ -0,0 +1,19 @@ +; RUN: not opt -passes='print<dxil-root-signature>' %s -S -o - 2>&1 | FileCheck %s + + +target triple = "dxil-unknown-shadermodel6.0-compute" + +; CHECK: error: Invalid value for Static Sampler Flag: 4 +; CHECK-NOT: Root Signature Definitions + +define void @main() #0 { +entry: + ret void +} +attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + + +!dx.rootsignatures = !{!2} ; list of function/root signature pairs +!2 = !{ ptr @main, !3, i32 3 } ; function, root signature +!3 = !{ !5 } ; list of root signature elements +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 4 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll index 47d4b52..312e769 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 666, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 666, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll index 855e0c0..80fd208 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 0x7FF8000000000000, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 0x7FF8000000000000, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll index 812749b..5daaf69 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float 0x7FF8000000000000, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float 0x7FF8000000000000, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll index 6898aec..423987b 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 6.660000e+02, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 6.660000e+02, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll index dc6ee42..af630dc 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 4294967280, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 4294967280, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll index 6cee1dd9..bd752f0 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 4294967295, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 4294967295, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll index fa5bf12..ca0c02d 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 666 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 666, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll index 1dd470d..77c5c7a 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll @@ -15,7 +15,7 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } ; DXC: - Name: RTS0 ; DXC-NEXT: Size: 76 diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers_V3.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers_V3.ll new file mode 100644 index 0000000..7e56f04 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers_V3.ll @@ -0,0 +1,42 @@ +; RUN: opt %s -dxil-embed -dxil-globals -S -o - | FileCheck %s +; RUN: llc %s --filetype=obj -o - | obj2yaml | FileCheck %s --check-prefix=DXC + +target triple = "dxil-unknown-shadermodel6.0-compute" + +; CHECK: @dx.rts0 = private constant [248 x i8] c"{{.*}}", section "RTS0", align 4 + +define void @main() #0 { +entry: + ret void +} +attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + + +!dx.rootsignatures = !{!2} ; list of function/root signature pairs +!2 = !{ ptr @main, !3, i32 3 } ; function, root signature +!3 = !{ !5, !6, !7, !8 } ; list of root signature elements +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 1 } +!6 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 43, i32 0, i32 0, i32 2 } +!7 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 44, i32 0, i32 0, i32 0 } +!8 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 45, i32 0, i32 0, i32 3 } + +; DXC: - Name: RTS0 +; DXC-NEXT: Size: 248 +; DXC-NEXT: RootSignature: +; DXC-NEXT: Version: 3 +; DXC-NEXT: NumRootParameters: 0 +; DXC-NEXT: RootParametersOffset: 24 +; DXC-NEXT: NumStaticSamplers: 4 +; DXC-NEXT: StaticSamplersOffset: 24 +; DXC-NEXT: Parameters: [] +; DXC-NEXT: Samplers: +; DXC-LABEL: ShaderRegister: 42 +; DXC: SAMPLER_FLAG_UINT_BORDER_COLOR: true +; DXC-LABEL: ShaderRegister: 43 +; DXC: SAMPLER_FLAG_NON_NORMALIZED_COORDINATES: true +; DXC-LABEL: ShaderRegister: 44 +; DXC-NOT: SAMPLER_FLAG_NON_NORMALIZED_COORDINATES: +; DXC-NOT: SAMPLER_FLAG_UINT_BORDER_COLOR: +; DXC-LABEL: ShaderRegister: 45 +; DXC: SAMPLER_FLAG_UINT_BORDER_COLOR: true +; DXC-NEXT: SAMPLER_FLAG_NON_NORMALIZED_COORDINATES: true diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll index c244095..b68606d 100644 --- a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll @@ -10,6 +10,6 @@ entry: !0 = !{ptr @CSMain, !1, i32 2} !1 = !{!2, !3} -!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0 } +!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0, i32 0 } !3 = !{!"DescriptorTable", i32 0, !4} !4 = !{!"Sampler", i32 1, i32 42, i32 0, i32 -1, i32 0} diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll index 9ac02ebb..7c836e2 100644 --- a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll @@ -10,5 +10,5 @@ entry: !0 = !{ptr @CSMain, !1, i32 2} !1 = !{!2, !3} -!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0 } -!3 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0, i32 0 } +!3 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll b/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll index 7c9f375..40d36fb 100644 --- a/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll +++ b/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll @@ -97,7 +97,6 @@ entry: ; ALL: lw $[[R0:[0-9]+]], %got(v4f32)( ; ALL: ld.w $w12, 0($[[R0]]) ; ALL: move.v $w[[W0:13]], $w12 -; NOODDSPREG: move.v $w[[W0:12]], $w13 ; ALL: teqi $zero, 1 ; ALL-NOT: st.w ; ALL-NOT: ld.w diff --git a/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll b/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll index a2ad294..98314a0 100644 --- a/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll +++ b/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll @@ -897,31 +897,31 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) { ; P8LE-NEXT: mfvsrd r6, v2 ; P8LE-NEXT: mfvsrd r8, v3 ; P8LE-NEXT: ori r3, r3, 51289 +; P8LE-NEXT: mffprd r4, f0 ; P8LE-NEXT: ori r5, r5, 42889 -; P8LE-NEXT: rldic r4, r3, 36, 1 -; P8LE-NEXT: mffprd r3, f0 +; P8LE-NEXT: rldic r3, r3, 36, 1 ; P8LE-NEXT: rldic r5, r5, 35, 1 ; P8LE-NEXT: rldicl r7, r6, 63, 1 -; P8LE-NEXT: oris r4, r4, 45590 +; P8LE-NEXT: oris r3, r3, 45590 ; P8LE-NEXT: oris r5, r5, 1603 -; P8LE-NEXT: ori r4, r4, 17097 +; P8LE-NEXT: ori r3, r3, 17097 ; P8LE-NEXT: ori r5, r5, 21445 -; P8LE-NEXT: mulhdu r4, r3, r4 +; P8LE-NEXT: mulhdu r3, r4, r3 ; P8LE-NEXT: mulhdu r5, r7, r5 -; P8LE-NEXT: sub r7, r3, r4 +; P8LE-NEXT: sub r7, r4, r3 ; P8LE-NEXT: rldicl r5, r5, 57, 7 ; P8LE-NEXT: rldicl r7, r7, 63, 1 ; P8LE-NEXT: mulli r5, r5, 654 -; P8LE-NEXT: add r4, r7, r4 +; P8LE-NEXT: add r3, r7, r3 ; P8LE-NEXT: lis r7, -16037 ; P8LE-NEXT: ori r7, r7, 28749 -; P8LE-NEXT: rldicl r4, r4, 60, 4 +; P8LE-NEXT: rldicl r3, r3, 60, 4 ; P8LE-NEXT: sub r5, r6, r5 ; P8LE-NEXT: rldic r7, r7, 32, 0 -; P8LE-NEXT: mulli r4, r4, 23 +; P8LE-NEXT: mulli r3, r3, 23 ; P8LE-NEXT: oris r7, r7, 52170 ; P8LE-NEXT: ori r7, r7, 12109 -; P8LE-NEXT: sub r3, r3, r4 +; P8LE-NEXT: sub r3, r4, r3 ; P8LE-NEXT: mulhdu r7, r8, r7 ; P8LE-NEXT: mtfprd f1, r3 ; P8LE-NEXT: li r3, 0 diff --git a/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll index 435b0ab..816b12e 100644 --- a/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll +++ b/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll @@ -35,12 +35,12 @@ define i64 @test2elt(<2 x i64> %a) local_unnamed_addr #0 { ; ; CHECK-BE-LABEL: test2elt: ; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: xscvuxdsp f0, v2 +; CHECK-BE-NEXT: xscvdpspn v3, f0 ; CHECK-BE-NEXT: xxswapd vs0, v2 -; CHECK-BE-NEXT: xscvuxdsp f1, v2 ; CHECK-BE-NEXT: xscvuxdsp f0, f0 -; CHECK-BE-NEXT: xscvdpspn v2, f1 -; CHECK-BE-NEXT: xscvdpspn v3, f0 -; CHECK-BE-NEXT: vmrgow v2, v2, v3 +; CHECK-BE-NEXT: xscvdpspn v2, f0 +; CHECK-BE-NEXT: vmrgow v2, v3, v2 ; CHECK-BE-NEXT: mfvsrd r3, v2 ; CHECK-BE-NEXT: blr entry: @@ -327,12 +327,12 @@ define i64 @test2elt_signed(<2 x i64> %a) local_unnamed_addr #0 { ; ; CHECK-BE-LABEL: test2elt_signed: ; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: xscvsxdsp f0, v2 +; CHECK-BE-NEXT: xscvdpspn v3, f0 ; CHECK-BE-NEXT: xxswapd vs0, v2 -; CHECK-BE-NEXT: xscvsxdsp f1, v2 ; CHECK-BE-NEXT: xscvsxdsp f0, f0 -; CHECK-BE-NEXT: xscvdpspn v2, f1 -; CHECK-BE-NEXT: xscvdpspn v3, f0 -; CHECK-BE-NEXT: vmrgow v2, v2, v3 +; CHECK-BE-NEXT: xscvdpspn v2, f0 +; CHECK-BE-NEXT: vmrgow v2, v3, v2 ; CHECK-BE-NEXT: mfvsrd r3, v2 ; CHECK-BE-NEXT: blr entry: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll index 9a1ed8f..1d5d918 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll @@ -37,7 +37,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind { ; ; RV32IA-LABEL: atomic_load_i8_unordered: ; RV32IA: # %bb.0: -; RV32IA-NEXT: lb a0, 0(a0) +; RV32IA-NEXT: lbu a0, 0(a0) ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i8_unordered: @@ -52,7 +52,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind { ; ; RV64IA-LABEL: atomic_load_i8_unordered: ; RV64IA: # %bb.0: -; RV64IA-NEXT: lb a0, 0(a0) +; RV64IA-NEXT: lbu a0, 0(a0) ; RV64IA-NEXT: ret %1 = load atomic i8, ptr %a unordered, align 1 ret i8 %1 @@ -71,7 +71,7 @@ define i8 @atomic_load_i8_monotonic(ptr %a) nounwind { ; ; RV32IA-LABEL: atomic_load_i8_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: lb a0, 0(a0) +; RV32IA-NEXT: lbu a0, 0(a0) ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i8_monotonic: @@ -86,7 +86,7 @@ define i8 @atomic_load_i8_monotonic(ptr %a) nounwind { ; ; RV64IA-LABEL: atomic_load_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: lb a0, 0(a0) +; RV64IA-NEXT: lbu a0, 0(a0) ; RV64IA-NEXT: ret %1 = load atomic i8, ptr %a monotonic, align 1 ret i8 %1 @@ -105,13 +105,13 @@ define i8 @atomic_load_i8_acquire(ptr %a) nounwind { ; ; RV32IA-WMO-LABEL: atomic_load_i8_acquire: ; RV32IA-WMO: # %bb.0: -; RV32IA-WMO-NEXT: lb a0, 0(a0) +; RV32IA-WMO-NEXT: lbu a0, 0(a0) ; RV32IA-WMO-NEXT: fence r, rw ; RV32IA-WMO-NEXT: ret ; ; RV32IA-TSO-LABEL: atomic_load_i8_acquire: ; RV32IA-TSO: # %bb.0: -; RV32IA-TSO-NEXT: lb a0, 0(a0) +; RV32IA-TSO-NEXT: lbu a0, 0(a0) ; RV32IA-TSO-NEXT: ret ; ; RV64I-LABEL: atomic_load_i8_acquire: @@ -126,35 +126,35 @@ define i8 @atomic_load_i8_acquire(ptr %a) nounwind { ; ; RV64IA-WMO-LABEL: atomic_load_i8_acquire: ; RV64IA-WMO: # %bb.0: -; RV64IA-WMO-NEXT: lb a0, 0(a0) +; RV64IA-WMO-NEXT: lbu a0, 0(a0) ; RV64IA-WMO-NEXT: fence r, rw ; RV64IA-WMO-NEXT: ret ; ; RV64IA-TSO-LABEL: atomic_load_i8_acquire: ; RV64IA-TSO: # %bb.0: -; RV64IA-TSO-NEXT: lb a0, 0(a0) +; RV64IA-TSO-NEXT: lbu a0, 0(a0) ; RV64IA-TSO-NEXT: ret ; ; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire: ; RV32IA-WMO-TRAILING-FENCE: # %bb.0: -; RV32IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV32IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire: ; RV32IA-TSO-TRAILING-FENCE: # %bb.0: -; RV32IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV32IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV32IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: -; RV64IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV64IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: -; RV64IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV64IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret %1 = load atomic i8, ptr %a acquire, align 1 ret i8 %1 @@ -174,14 +174,14 @@ define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind { ; RV32IA-WMO-LABEL: atomic_load_i8_seq_cst: ; RV32IA-WMO: # %bb.0: ; RV32IA-WMO-NEXT: fence rw, rw -; RV32IA-WMO-NEXT: lb a0, 0(a0) +; RV32IA-WMO-NEXT: lbu a0, 0(a0) ; RV32IA-WMO-NEXT: fence r, rw ; RV32IA-WMO-NEXT: ret ; ; RV32IA-TSO-LABEL: atomic_load_i8_seq_cst: ; RV32IA-TSO: # %bb.0: ; RV32IA-TSO-NEXT: fence rw, rw -; RV32IA-TSO-NEXT: lb a0, 0(a0) +; RV32IA-TSO-NEXT: lbu a0, 0(a0) ; RV32IA-TSO-NEXT: ret ; ; RV64I-LABEL: atomic_load_i8_seq_cst: @@ -197,40 +197,40 @@ define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind { ; RV64IA-WMO-LABEL: atomic_load_i8_seq_cst: ; RV64IA-WMO: # %bb.0: ; RV64IA-WMO-NEXT: fence rw, rw -; RV64IA-WMO-NEXT: lb a0, 0(a0) +; RV64IA-WMO-NEXT: lbu a0, 0(a0) ; RV64IA-WMO-NEXT: fence r, rw ; RV64IA-WMO-NEXT: ret ; ; RV64IA-TSO-LABEL: atomic_load_i8_seq_cst: ; RV64IA-TSO: # %bb.0: ; RV64IA-TSO-NEXT: fence rw, rw -; RV64IA-TSO-NEXT: lb a0, 0(a0) +; RV64IA-TSO-NEXT: lbu a0, 0(a0) ; RV64IA-TSO-NEXT: ret ; ; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst: ; RV32IA-WMO-TRAILING-FENCE: # %bb.0: ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw -; RV32IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV32IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst: ; RV32IA-TSO-TRAILING-FENCE: # %bb.0: ; RV32IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw -; RV32IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV32IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV32IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw -; RV64IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV64IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: ; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw -; RV64IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV64IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret %1 = load atomic i8, ptr %a seq_cst, align 1 ret i8 %1 diff --git a/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll b/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll index f8b1d50..edec1d0 100644 --- a/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll +++ b/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll @@ -11,6 +11,8 @@ ; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-NOZICOND,SFB-NOZICOND-C %s ; RUN: llc -mtriple=riscv64 -mattr=+short-forward-branch-opt,+zicond -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-ZICOND %s +; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=RV32IXQCI ; The conditional move optimization in sifive-p450 requires that only a ; single c.mv instruction appears in the branch shadow. @@ -42,6 +44,14 @@ define signext i32 @test1(i32 signext %x, i32 signext %y, i32 signext %z) { ; SHORT_FORWARD-NEXT: xor a0, a0, a1 ; SHORT_FORWARD-NEXT: .LBB0_2: ; SHORT_FORWARD-NEXT: ret +; +; RV32IXQCI-LABEL: test1: +; RV32IXQCI: # %bb.0: +; RV32IXQCI-NEXT: bnez a2, .LBB0_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB0_2: +; RV32IXQCI-NEXT: ret %c = icmp eq i32 %z, 0 %a = xor i32 %x, %y %b = select i1 %c, i32 %a, i32 %x @@ -73,6 +83,14 @@ define signext i32 @test2(i32 signext %x, i32 signext %y, i32 signext %z) { ; SHORT_FORWARD-NEXT: xor a0, a0, a1 ; SHORT_FORWARD-NEXT: .LBB1_2: ; SHORT_FORWARD-NEXT: ret +; +; RV32IXQCI-LABEL: test2: +; RV32IXQCI: # %bb.0: +; RV32IXQCI-NEXT: beqz a2, .LBB1_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB1_2: +; RV32IXQCI-NEXT: ret %c = icmp eq i32 %z, 0 %a = xor i32 %x, %y %b = select i1 %c, i32 %x, i32 %a @@ -120,6 +138,19 @@ define signext i32 @test3(i32 signext %v, i32 signext %w, i32 signext %x, i32 si ; SHORT_FORWARD-NEXT: .LBB2_4: ; SHORT_FORWARD-NEXT: addw a0, a0, a2 ; SHORT_FORWARD-NEXT: ret +; +; RV32IXQCI-LABEL: test3: +; RV32IXQCI: # %bb.0: +; RV32IXQCI-NEXT: beqz a4, .LBB2_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB2_2: +; RV32IXQCI-NEXT: beqz a4, .LBB2_4 +; RV32IXQCI-NEXT: # %bb.3: +; RV32IXQCI-NEXT: xor a2, a2, a3 +; RV32IXQCI-NEXT: .LBB2_4: +; RV32IXQCI-NEXT: add a0, a0, a2 +; RV32IXQCI-NEXT: ret %c = icmp eq i32 %z, 0 %a = xor i32 %v, %w %b = select i1 %c, i32 %v, i32 %a @@ -167,6 +198,12 @@ define signext i32 @test4(i32 signext %x, i32 signext %y, i32 signext %z) { ; SFB-ZICOND-NEXT: li a0, 3 ; SFB-ZICOND-NEXT: czero.nez a0, a0, a2 ; SFB-ZICOND-NEXT: ret +; +; RV32IXQCI-LABEL: test4: +; RV32IXQCI: # %bb.0: +; RV32IXQCI-NEXT: li a0, 0 +; RV32IXQCI-NEXT: qc.lieqi a0, a2, 0, 3 +; RV32IXQCI-NEXT: ret %c = icmp eq i32 %z, 0 %a = select i1 %c, i32 3, i32 0 ret i32 %a @@ -199,6 +236,15 @@ define i16 @select_xor_1(i16 %A, i8 %cond) { ; SHORT_FORWARD-NEXT: xori a0, a0, 43 ; SHORT_FORWARD-NEXT: .LBB4_2: # %entry ; SHORT_FORWARD-NEXT: ret +; +; RV32IXQCI-LABEL: select_xor_1: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a1, a1, 1 +; RV32IXQCI-NEXT: beqz a1, .LBB4_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xori a0, a0, 43 +; RV32IXQCI-NEXT: .LBB4_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp eq i8 %and, 0 @@ -236,6 +282,15 @@ define i16 @select_xor_1b(i16 %A, i8 %cond) { ; SHORT_FORWARD-NEXT: xori a0, a0, 43 ; SHORT_FORWARD-NEXT: .LBB5_2: # %entry ; SHORT_FORWARD-NEXT: ret +; +; RV32IXQCI-LABEL: select_xor_1b: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a1, a1, 1 +; RV32IXQCI-NEXT: beqz a1, .LBB5_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xori a0, a0, 43 +; RV32IXQCI-NEXT: .LBB5_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp ne i8 %and, 1 @@ -289,6 +344,15 @@ define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) { ; SFB-ZICOND-NEXT: xor a0, a1, a0 ; SFB-ZICOND-NEXT: .LBB6_2: # %entry ; SFB-ZICOND-NEXT: ret +; +; RV32IXQCI-LABEL: select_xor_2: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB6_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB6_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp eq i8 %and, 0 @@ -344,6 +408,15 @@ define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) { ; SFB-ZICOND-NEXT: xor a0, a1, a0 ; SFB-ZICOND-NEXT: .LBB7_2: # %entry ; SFB-ZICOND-NEXT: ret +; +; RV32IXQCI-LABEL: select_xor_2b: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB7_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB7_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp ne i8 %and, 1 @@ -397,6 +470,15 @@ define i32 @select_or(i32 %A, i32 %B, i8 %cond) { ; SFB-ZICOND-NEXT: or a0, a1, a0 ; SFB-ZICOND-NEXT: .LBB8_2: # %entry ; SFB-ZICOND-NEXT: ret +; +; RV32IXQCI-LABEL: select_or: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB8_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB8_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp eq i8 %and, 0 @@ -452,6 +534,15 @@ define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) { ; SFB-ZICOND-NEXT: or a0, a1, a0 ; SFB-ZICOND-NEXT: .LBB9_2: # %entry ; SFB-ZICOND-NEXT: ret +; +; RV32IXQCI-LABEL: select_or_b: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB9_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB9_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp ne i8 %and, 1 @@ -505,6 +596,15 @@ define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) { ; SFB-ZICOND-NEXT: or a0, a1, a0 ; SFB-ZICOND-NEXT: .LBB10_2: # %entry ; SFB-ZICOND-NEXT: ret +; +; RV32IXQCI-LABEL: select_or_1: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB10_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB10_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i32 %cond, 1 %cmp10 = icmp eq i32 %and, 0 @@ -560,6 +660,15 @@ define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) { ; SFB-ZICOND-NEXT: or a0, a1, a0 ; SFB-ZICOND-NEXT: .LBB11_2: # %entry ; SFB-ZICOND-NEXT: ret +; +; RV32IXQCI-LABEL: select_or_1b: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB11_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB11_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i32 %cond, 1 %cmp10 = icmp ne i32 %and, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/expandload.ll b/llvm/test/CodeGen/RISCV/rvv/expandload.ll index 9173fa4..cc1282a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/expandload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/expandload.ll @@ -1666,20 +1666,20 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: .LBB61_32: # %else114 ; CHECK-RV32-NEXT: slli a2, a3, 1 ; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; CHECK-RV32-NEXT: vsrl.vx v16, v0, a1 +; CHECK-RV32-NEXT: vsrl.vx v24, v0, a1 ; CHECK-RV32-NEXT: bgez a2, .LBB61_34 ; CHECK-RV32-NEXT: # %bb.33: # %cond.load117 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v24, v8 +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a2 ; CHECK-RV32-NEXT: vsetivli zero, 31, e8, m1, tu, ma ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 30 ; CHECK-RV32-NEXT: addi a0, a0, 1 -; CHECK-RV32-NEXT: vmv1r.v v24, v8 -; CHECK-RV32-NEXT: vmv8r.v v8, v24 +; CHECK-RV32-NEXT: vmv1r.v v16, v8 +; CHECK-RV32-NEXT: vmv8r.v v8, v16 ; CHECK-RV32-NEXT: .LBB61_34: # %else118 ; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; CHECK-RV32-NEXT: vmv.x.s a2, v16 +; CHECK-RV32-NEXT: vmv.x.s a2, v24 ; CHECK-RV32-NEXT: bgez a3, .LBB61_35 ; CHECK-RV32-NEXT: j .LBB61_572 ; CHECK-RV32-NEXT: .LBB61_35: # %else122 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll index 1d691b1..a2fcd79 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll @@ -661,8 +661,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v16, v8, v0 ; RV32-NEXT: csrr a7, vlenb -; RV32-NEXT: li t3, 36 -; RV32-NEXT: mul a7, a7, t3 +; RV32-NEXT: slli a7, a7, 5 ; RV32-NEXT: add a7, sp, a7 ; RV32-NEXT: addi a7, a7, 16 ; RV32-NEXT: vs8r.v v8, (a7) # vscale x 64-byte Folded Spill @@ -682,7 +681,11 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vl8r.v v8, (t1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vmerge.vvm v8, v24, v8, v0 -; RV32-NEXT: addi t1, sp, 16 +; RV32-NEXT: csrr t1, vlenb +; RV32-NEXT: li t2, 44 +; RV32-NEXT: mul t1, t1, t2 +; RV32-NEXT: add t1, sp, t1 +; RV32-NEXT: addi t1, t1, 16 ; RV32-NEXT: vs4r.v v8, (t1) # vscale x 32-byte Folded Spill ; RV32-NEXT: vmv.s.x v0, a7 ; RV32-NEXT: addi a3, a3, 12 @@ -694,8 +697,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v16, v24, v0 ; RV32-NEXT: csrr a7, vlenb -; RV32-NEXT: li t1, 20 -; RV32-NEXT: mul a7, a7, t1 +; RV32-NEXT: slli a7, a7, 4 ; RV32-NEXT: add a7, sp, a7 ; RV32-NEXT: addi a7, a7, 16 ; RV32-NEXT: vs8r.v v8, (a7) # vscale x 64-byte Folded Spill @@ -733,7 +735,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 ; RV32-NEXT: csrr a7, vlenb -; RV32-NEXT: li t0, 28 +; RV32-NEXT: li t0, 24 ; RV32-NEXT: mul a7, a7, t0 ; RV32-NEXT: add a7, sp, a7 ; RV32-NEXT: addi a7, a7, 16 @@ -755,7 +757,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vmerge.vvm v8, v24, v8, v0 ; RV32-NEXT: csrr a6, vlenb -; RV32-NEXT: li a7, 44 +; RV32-NEXT: li a7, 40 ; RV32-NEXT: mul a6, a6, a7 ; RV32-NEXT: add a6, sp, a6 ; RV32-NEXT: addi a6, a6, 16 @@ -772,24 +774,19 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a4, 12 -; RV32-NEXT: mul a1, a1, a4 +; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV32-NEXT: vmv.s.x v0, a3 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a3, 36 -; RV32-NEXT: mul a1, a1, a3 +; RV32-NEXT: slli a1, a1, 5 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vrgatherei16.vv v24, v8, v6 -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 2 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vs8r.v v24, (a1) # vscale x 64-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a3, 92 @@ -812,8 +809,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs4r.v v8, (a1) # vscale x 32-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a3, 20 -; RV32-NEXT: mul a1, a1, a3 +; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload @@ -835,12 +831,6 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 84 -; RV32-NEXT: mul a1, a1, a2 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill -; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a2, 72 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 @@ -860,30 +850,36 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs4r.v v28, (a1) # vscale x 32-byte Folded Spill +; RV32-NEXT: addi a1, sp, 16 +; RV32-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a2, 60 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vl4r.v v16, (a1) # vscale x 32-byte Folded Reload +; RV32-NEXT: vl4r.v v20, (a1) # vscale x 32-byte Folded Reload +; RV32-NEXT: vmv.v.v v20, v16 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 2 +; RV32-NEXT: li a2, 60 +; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload -; RV32-NEXT: vmv.v.v v16, v8 +; RV32-NEXT: vs4r.v v20, (a1) # vscale x 32-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 60 +; RV32-NEXT: li a2, 44 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vs4r.v v16, (a1) # vscale x 32-byte Folded Spill -; RV32-NEXT: addi a1, sp, 16 -; RV32-NEXT: vl4r.v v8, (a1) # vscale x 32-byte Folded Reload +; RV32-NEXT: vl4r.v v16, (a1) # vscale x 32-byte Folded Reload ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma -; RV32-NEXT: vrgatherei16.vv v28, v8, v3 +; RV32-NEXT: vrgatherei16.vv v20, v16, v3 ; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma -; RV32-NEXT: vmv.v.v v28, v24 +; RV32-NEXT: vmv.v.v v20, v24 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 6 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs4r.v v20, (a1) # vscale x 32-byte Folded Spill ; RV32-NEXT: lui a1, %hi(.LCPI27_4) ; RV32-NEXT: addi a1, a1, %lo(.LCPI27_4) ; RV32-NEXT: lui a2, %hi(.LCPI27_5) @@ -891,13 +887,25 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; RV32-NEXT: vle16.v v24, (a2) ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV32-NEXT: vle16.v v8, (a1) +; RV32-NEXT: vle16.v v16, (a1) +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 84 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs1r.v v16, (a1) # vscale x 8-byte Folded Spill ; RV32-NEXT: lui a1, %hi(.LCPI27_7) ; RV32-NEXT: addi a1, a1, %lo(.LCPI27_7) ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV32-NEXT: vle16.v v10, (a1) +; RV32-NEXT: vle16.v v16, (a1) ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 28 +; RV32-NEXT: li a2, 76 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs2r.v v16, (a1) # vscale x 16-byte Folded Spill +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 24 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 @@ -909,18 +917,29 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl4r.v v20, (a1) # vscale x 32-byte Folded Reload +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 84 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vl1r.v v7, (a1) # vscale x 8-byte Folded Reload ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma -; RV32-NEXT: vrgatherei16.vv v24, v20, v8 +; RV32-NEXT: vrgatherei16.vv v24, v20, v7 ; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma ; RV32-NEXT: vmv.v.v v24, v16 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 12 -; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl8r.v v0, (a1) # vscale x 64-byte Folded Reload +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 76 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vl2r.v v28, (a1) # vscale x 16-byte Folded Reload ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV32-NEXT: vrgatherei16.vv v16, v0, v10 +; RV32-NEXT: vrgatherei16.vv v16, v0, v28 ; RV32-NEXT: lui a1, %hi(.LCPI27_6) ; RV32-NEXT: addi a1, a1, %lo(.LCPI27_6) ; RV32-NEXT: lui a2, %hi(.LCPI27_8) @@ -934,7 +953,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle16.v v5, (a2) ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 44 +; RV32-NEXT: li a2, 40 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 @@ -942,12 +961,6 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vrgatherei16.vv v0, v20, v4 ; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma ; RV32-NEXT: vmv.v.v v0, v16 -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 84 -; RV32-NEXT: mul a1, a1, a2 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vrgatherei16.vv v16, v8, v6 ; RV32-NEXT: csrr a1, vlenb @@ -968,7 +981,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: addi a1, a0, 192 ; RV32-NEXT: vse32.v v24, (a1) ; RV32-NEXT: addi a1, a0, 128 -; RV32-NEXT: vse32.v v28, (a1) +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 6 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vl4r.v v8, (a2) # vscale x 32-byte Folded Reload +; RV32-NEXT: vse32.v v8, (a1) ; RV32-NEXT: addi a1, a0, 64 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: li a3, 60 diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll index d995a31..acc6849 100644 --- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll @@ -416,14 +416,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) { ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v12, v8, 1, v0 +; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v12, v16 -; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v13, v16 -; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v14, v16 -; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v15, v16 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v8, v16 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v9, v16 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v16 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v16 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv32i1: @@ -437,14 +437,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) { ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-256-NEXT: vmerge.vim v12, v8, 1, v0 +; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV32-BITS-256-NEXT: vrgather.vv v11, v12, v16 -; RV32-BITS-256-NEXT: vrgather.vv v10, v13, v16 -; RV32-BITS-256-NEXT: vrgather.vv v9, v14, v16 -; RV32-BITS-256-NEXT: vrgather.vv v8, v15, v16 +; RV32-BITS-256-NEXT: vrgather.vv v15, v8, v16 +; RV32-BITS-256-NEXT: vrgather.vv v14, v9, v16 +; RV32-BITS-256-NEXT: vrgather.vv v13, v10, v16 +; RV32-BITS-256-NEXT: vrgather.vv v12, v11, v16 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-256-NEXT: vmsne.vi v0, v12, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv32i1: @@ -458,14 +458,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) { ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-512-NEXT: vmerge.vim v12, v8, 1, v0 +; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV32-BITS-512-NEXT: vrgather.vv v11, v12, v16 -; RV32-BITS-512-NEXT: vrgather.vv v10, v13, v16 -; RV32-BITS-512-NEXT: vrgather.vv v9, v14, v16 -; RV32-BITS-512-NEXT: vrgather.vv v8, v15, v16 +; RV32-BITS-512-NEXT: vrgather.vv v15, v8, v16 +; RV32-BITS-512-NEXT: vrgather.vv v14, v9, v16 +; RV32-BITS-512-NEXT: vrgather.vv v13, v10, v16 +; RV32-BITS-512-NEXT: vrgather.vv v12, v11, v16 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-512-NEXT: vmsne.vi v0, v12, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv32i1: @@ -479,14 +479,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) { ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v12, v8, 1, v0 +; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v12, v16 -; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v13, v16 -; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v14, v16 -; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v15, v16 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v8, v16 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v9, v16 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v16 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v16 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv32i1: @@ -500,14 +500,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) { ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-256-NEXT: vmerge.vim v12, v8, 1, v0 +; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV64-BITS-256-NEXT: vrgather.vv v11, v12, v16 -; RV64-BITS-256-NEXT: vrgather.vv v10, v13, v16 -; RV64-BITS-256-NEXT: vrgather.vv v9, v14, v16 -; RV64-BITS-256-NEXT: vrgather.vv v8, v15, v16 +; RV64-BITS-256-NEXT: vrgather.vv v15, v8, v16 +; RV64-BITS-256-NEXT: vrgather.vv v14, v9, v16 +; RV64-BITS-256-NEXT: vrgather.vv v13, v10, v16 +; RV64-BITS-256-NEXT: vrgather.vv v12, v11, v16 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-256-NEXT: vmsne.vi v0, v12, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv32i1: @@ -521,14 +521,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) { ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-512-NEXT: vmerge.vim v12, v8, 1, v0 +; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV64-BITS-512-NEXT: vrgather.vv v11, v12, v16 -; RV64-BITS-512-NEXT: vrgather.vv v10, v13, v16 -; RV64-BITS-512-NEXT: vrgather.vv v9, v14, v16 -; RV64-BITS-512-NEXT: vrgather.vv v8, v15, v16 +; RV64-BITS-512-NEXT: vrgather.vv v15, v8, v16 +; RV64-BITS-512-NEXT: vrgather.vv v14, v9, v16 +; RV64-BITS-512-NEXT: vrgather.vv v13, v10, v16 +; RV64-BITS-512-NEXT: vrgather.vv v12, v11, v16 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-512-NEXT: vmsne.vi v0, v12, 0 ; RV64-BITS-512-NEXT: ret %res = call <vscale x 32 x i1> @llvm.vector.reverse.nxv32i1(<vscale x 32 x i1> %a) ret <vscale x 32 x i1> %res diff --git a/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll b/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll index 4bc6313..1ee7e13 100644 --- a/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll +++ b/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll @@ -37772,18 +37772,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_P1(<vscale x 64 x i8> %val, <vs ; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_P1: ; CHECK-RV32VC: # %bb.0: ; CHECK-RV32VC-NEXT: csrr a1, vlenb -; CHECK-RV32VC-NEXT: slli a5, a1, 4 +; CHECK-RV32VC-NEXT: slli a6, a1, 4 ; CHECK-RV32VC-NEXT: slli a2, a1, 2 -; CHECK-RV32VC-NEXT: slli a6, a1, 3 +; CHECK-RV32VC-NEXT: slli a5, a1, 3 ; CHECK-RV32VC-NEXT: mv a4, a3 ; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB915_2 ; CHECK-RV32VC-NEXT: # %bb.1: ; CHECK-RV32VC-NEXT: mv a4, a2 ; CHECK-RV32VC-NEXT: .LBB915_2: ; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0) -; CHECK-RV32VC-NEXT: add a7, a0, a5 +; CHECK-RV32VC-NEXT: add a6, a6, a0 ; CHECK-RV32VC-NEXT: slli a1, a1, 1 -; CHECK-RV32VC-NEXT: add a0, a0, a6 +; CHECK-RV32VC-NEXT: add a0, a0, a5 ; CHECK-RV32VC-NEXT: mv a5, a4 ; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB915_4 ; CHECK-RV32VC-NEXT: # %bb.3: @@ -37791,11 +37791,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_P1(<vscale x 64 x i8> %val, <vs ; CHECK-RV32VC-NEXT: .LBB915_4: ; CHECK-RV32VC-NEXT: addi sp, sp, -16 ; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16 -; CHECK-RV32VC-NEXT: csrr a6, vlenb -; CHECK-RV32VC-NEXT: slli a6, a6, 3 -; CHECK-RV32VC-NEXT: sub sp, sp, a6 +; CHECK-RV32VC-NEXT: csrr a7, vlenb +; CHECK-RV32VC-NEXT: slli a7, a7, 3 +; CHECK-RV32VC-NEXT: sub sp, sp, a7 ; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7) +; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6) ; CHECK-RV32VC-NEXT: addi a6, sp, 16 ; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill ; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0) @@ -38397,18 +38397,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_PALL(<vscale x 64 x i8> %val, < ; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_PALL: ; CHECK-RV32VC: # %bb.0: ; CHECK-RV32VC-NEXT: csrr a1, vlenb -; CHECK-RV32VC-NEXT: slli a5, a1, 4 +; CHECK-RV32VC-NEXT: slli a6, a1, 4 ; CHECK-RV32VC-NEXT: slli a2, a1, 2 -; CHECK-RV32VC-NEXT: slli a6, a1, 3 +; CHECK-RV32VC-NEXT: slli a5, a1, 3 ; CHECK-RV32VC-NEXT: mv a4, a3 ; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB916_2 ; CHECK-RV32VC-NEXT: # %bb.1: ; CHECK-RV32VC-NEXT: mv a4, a2 ; CHECK-RV32VC-NEXT: .LBB916_2: ; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0) -; CHECK-RV32VC-NEXT: add a7, a0, a5 +; CHECK-RV32VC-NEXT: add a6, a6, a0 ; CHECK-RV32VC-NEXT: slli a1, a1, 1 -; CHECK-RV32VC-NEXT: add a0, a0, a6 +; CHECK-RV32VC-NEXT: add a0, a0, a5 ; CHECK-RV32VC-NEXT: mv a5, a4 ; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB916_4 ; CHECK-RV32VC-NEXT: # %bb.3: @@ -38416,11 +38416,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_PALL(<vscale x 64 x i8> %val, < ; CHECK-RV32VC-NEXT: .LBB916_4: ; CHECK-RV32VC-NEXT: addi sp, sp, -16 ; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16 -; CHECK-RV32VC-NEXT: csrr a6, vlenb -; CHECK-RV32VC-NEXT: slli a6, a6, 3 -; CHECK-RV32VC-NEXT: sub sp, sp, a6 +; CHECK-RV32VC-NEXT: csrr a7, vlenb +; CHECK-RV32VC-NEXT: slli a7, a7, 3 +; CHECK-RV32VC-NEXT: sub sp, sp, a7 ; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7) +; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6) ; CHECK-RV32VC-NEXT: addi a6, sp, 16 ; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill ; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0) @@ -39022,18 +39022,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_S1(<vscale x 64 x i8> %val, <vs ; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_S1: ; CHECK-RV32VC: # %bb.0: ; CHECK-RV32VC-NEXT: csrr a1, vlenb -; CHECK-RV32VC-NEXT: slli a5, a1, 4 +; CHECK-RV32VC-NEXT: slli a6, a1, 4 ; CHECK-RV32VC-NEXT: slli a2, a1, 2 -; CHECK-RV32VC-NEXT: slli a6, a1, 3 +; CHECK-RV32VC-NEXT: slli a5, a1, 3 ; CHECK-RV32VC-NEXT: mv a4, a3 ; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB917_2 ; CHECK-RV32VC-NEXT: # %bb.1: ; CHECK-RV32VC-NEXT: mv a4, a2 ; CHECK-RV32VC-NEXT: .LBB917_2: ; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0) -; CHECK-RV32VC-NEXT: add a7, a0, a5 +; CHECK-RV32VC-NEXT: add a6, a6, a0 ; CHECK-RV32VC-NEXT: slli a1, a1, 1 -; CHECK-RV32VC-NEXT: add a0, a0, a6 +; CHECK-RV32VC-NEXT: add a0, a0, a5 ; CHECK-RV32VC-NEXT: mv a5, a4 ; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB917_4 ; CHECK-RV32VC-NEXT: # %bb.3: @@ -39041,11 +39041,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_S1(<vscale x 64 x i8> %val, <vs ; CHECK-RV32VC-NEXT: .LBB917_4: ; CHECK-RV32VC-NEXT: addi sp, sp, -16 ; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16 -; CHECK-RV32VC-NEXT: csrr a6, vlenb -; CHECK-RV32VC-NEXT: slli a6, a6, 3 -; CHECK-RV32VC-NEXT: sub sp, sp, a6 +; CHECK-RV32VC-NEXT: csrr a7, vlenb +; CHECK-RV32VC-NEXT: slli a7, a7, 3 +; CHECK-RV32VC-NEXT: sub sp, sp, a7 ; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7) +; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6) ; CHECK-RV32VC-NEXT: addi a6, sp, 16 ; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill ; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0) @@ -39647,18 +39647,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_ALL(<vscale x 64 x i8> %val, <v ; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_ALL: ; CHECK-RV32VC: # %bb.0: ; CHECK-RV32VC-NEXT: csrr a1, vlenb -; CHECK-RV32VC-NEXT: slli a5, a1, 4 +; CHECK-RV32VC-NEXT: slli a6, a1, 4 ; CHECK-RV32VC-NEXT: slli a2, a1, 2 -; CHECK-RV32VC-NEXT: slli a6, a1, 3 +; CHECK-RV32VC-NEXT: slli a5, a1, 3 ; CHECK-RV32VC-NEXT: mv a4, a3 ; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB918_2 ; CHECK-RV32VC-NEXT: # %bb.1: ; CHECK-RV32VC-NEXT: mv a4, a2 ; CHECK-RV32VC-NEXT: .LBB918_2: ; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0) -; CHECK-RV32VC-NEXT: add a7, a0, a5 +; CHECK-RV32VC-NEXT: add a6, a6, a0 ; CHECK-RV32VC-NEXT: slli a1, a1, 1 -; CHECK-RV32VC-NEXT: add a0, a0, a6 +; CHECK-RV32VC-NEXT: add a0, a0, a5 ; CHECK-RV32VC-NEXT: mv a5, a4 ; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB918_4 ; CHECK-RV32VC-NEXT: # %bb.3: @@ -39666,11 +39666,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_ALL(<vscale x 64 x i8> %val, <v ; CHECK-RV32VC-NEXT: .LBB918_4: ; CHECK-RV32VC-NEXT: addi sp, sp, -16 ; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16 -; CHECK-RV32VC-NEXT: csrr a6, vlenb -; CHECK-RV32VC-NEXT: slli a6, a6, 3 -; CHECK-RV32VC-NEXT: sub sp, sp, a6 +; CHECK-RV32VC-NEXT: csrr a7, vlenb +; CHECK-RV32VC-NEXT: slli a7, a7, 3 +; CHECK-RV32VC-NEXT: sub sp, sp, a7 ; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7) +; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6) ; CHECK-RV32VC-NEXT: addi a6, sp, 16 ; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill ; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0) @@ -40271,18 +40271,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_DEFAULT(<vscale x 64 x i8> %val ; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_DEFAULT: ; CHECK-RV32VC: # %bb.0: ; CHECK-RV32VC-NEXT: csrr a1, vlenb -; CHECK-RV32VC-NEXT: slli a5, a1, 4 +; CHECK-RV32VC-NEXT: slli a6, a1, 4 ; CHECK-RV32VC-NEXT: slli a2, a1, 2 -; CHECK-RV32VC-NEXT: slli a6, a1, 3 +; CHECK-RV32VC-NEXT: slli a5, a1, 3 ; CHECK-RV32VC-NEXT: mv a4, a3 ; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB919_2 ; CHECK-RV32VC-NEXT: # %bb.1: ; CHECK-RV32VC-NEXT: mv a4, a2 ; CHECK-RV32VC-NEXT: .LBB919_2: ; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0) -; CHECK-RV32VC-NEXT: add a7, a0, a5 +; CHECK-RV32VC-NEXT: add a6, a6, a0 ; CHECK-RV32VC-NEXT: slli a1, a1, 1 -; CHECK-RV32VC-NEXT: add a0, a0, a6 +; CHECK-RV32VC-NEXT: add a0, a0, a5 ; CHECK-RV32VC-NEXT: mv a5, a4 ; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB919_4 ; CHECK-RV32VC-NEXT: # %bb.3: @@ -40290,11 +40290,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_DEFAULT(<vscale x 64 x i8> %val ; CHECK-RV32VC-NEXT: .LBB919_4: ; CHECK-RV32VC-NEXT: addi sp, sp, -16 ; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16 -; CHECK-RV32VC-NEXT: csrr a6, vlenb -; CHECK-RV32VC-NEXT: slli a6, a6, 3 -; CHECK-RV32VC-NEXT: sub sp, sp, a6 +; CHECK-RV32VC-NEXT: csrr a7, vlenb +; CHECK-RV32VC-NEXT: slli a7, a7, 3 +; CHECK-RV32VC-NEXT: sub sp, sp, a7 ; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7) +; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6) ; CHECK-RV32VC-NEXT: addi a6, sp, 16 ; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill ; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll index 02825b2..19a1841 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -6018,3 +6018,39 @@ vector.latch: ; preds = %for.body419 for.cond.cleanup: ; preds = %vector.latch ret void } + +;; This is exactly like sink_add_splat except that the splat has operands +;; which haven't been converted to undef. +define void @sink_non_canonical_splat(ptr nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_non_canonical_splat: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lui a2, 1 +; CHECK-NEXT: add a2, a0, a2 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: .LBB131_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vadd.vx v8, v8, a1 +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bne a0, a2, .LBB131_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i32, ptr %a, i64 %index + %wide.load = load <4 x i32>, ptr %0, align 4 + %1 = add <4 x i32> %wide.load, %broadcast.splat + store <4 x i32> %1, ptr %0, align 4 + %index.next = add nuw i64 %index, 4 + %2 = icmp eq i64 %index.next, 1024 + br i1 %2, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} diff --git a/llvm/test/CodeGen/RISCV/select-bare.ll b/llvm/test/CodeGen/RISCV/select-bare.ll index 44028a7..550eb94 100644 --- a/llvm/test/CodeGen/RISCV/select-bare.ll +++ b/llvm/test/CodeGen/RISCV/select-bare.ll @@ -3,7 +3,7 @@ ; RUN: | FileCheck %s -check-prefix=RV32I ; RUN: llc -mtriple=riscv64 -mattr=+xmipscmov -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64I-CCMOV %s -; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCI define i32 @bare_select(i1 %a, i32 %b, i32 %c) nounwind { diff --git a/llvm/test/CodeGen/RISCV/select-cc.ll b/llvm/test/CodeGen/RISCV/select-cc.ll index b57f625..95f5a9d 100644 --- a/llvm/test/CodeGen/RISCV/select-cc.ll +++ b/llvm/test/CodeGen/RISCV/select-cc.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -disable-block-placement -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV32I %s -; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCI ; RUN: llc -mtriple=riscv64 -disable-block-placement -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV64I %s @@ -88,39 +88,38 @@ define signext i32 @foo(i32 signext %a, ptr %b) nounwind { ; RV32IXQCI-LABEL: foo: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: lw a2, 0(a1) -; RV32IXQCI-NEXT: lw a4, 0(a1) -; RV32IXQCI-NEXT: lw t5, 0(a1) -; RV32IXQCI-NEXT: lw t4, 0(a1) -; RV32IXQCI-NEXT: lw t3, 0(a1) -; RV32IXQCI-NEXT: lw t2, 0(a1) -; RV32IXQCI-NEXT: lw t0, 0(a1) -; RV32IXQCI-NEXT: lw a7, 0(a1) -; RV32IXQCI-NEXT: lw a6, 0(a1) ; RV32IXQCI-NEXT: lw a3, 0(a1) -; RV32IXQCI-NEXT: lw t1, 0(a1) +; RV32IXQCI-NEXT: lw a4, 0(a1) ; RV32IXQCI-NEXT: lw a5, 0(a1) -; RV32IXQCI-NEXT: bltz t1, .LBB0_2 +; RV32IXQCI-NEXT: qc.mvne a0, a0, a2, a2 +; RV32IXQCI-NEXT: qc.mveq a0, a0, a3, a3 +; RV32IXQCI-NEXT: lw a2, 0(a1) +; RV32IXQCI-NEXT: qc.mvgeu a0, a4, a0, a4 +; RV32IXQCI-NEXT: lw a3, 0(a1) +; RV32IXQCI-NEXT: qc.mvltu a0, a0, a5, a5 +; RV32IXQCI-NEXT: lw a4, 0(a1) +; RV32IXQCI-NEXT: qc.mvgeu a0, a0, a2, a2 +; RV32IXQCI-NEXT: lw a2, 0(a1) +; RV32IXQCI-NEXT: qc.mvltu a0, a3, a0, a3 +; RV32IXQCI-NEXT: lw a3, 0(a1) +; RV32IXQCI-NEXT: qc.mvge a0, a4, a0, a4 +; RV32IXQCI-NEXT: lw a4, 0(a1) +; RV32IXQCI-NEXT: qc.mvlt a0, a0, a2, a2 +; RV32IXQCI-NEXT: lw a2, 0(a1) +; RV32IXQCI-NEXT: qc.mvge a0, a0, a3, a3 +; RV32IXQCI-NEXT: lw a3, 0(a1) +; RV32IXQCI-NEXT: qc.mvlt a0, a4, a0, a4 +; RV32IXQCI-NEXT: lw a4, 0(a1) +; RV32IXQCI-NEXT: lw a1, 0(a1) +; RV32IXQCI-NEXT: blez a2, .LBB0_2 ; RV32IXQCI-NEXT: # %bb.1: -; RV32IXQCI-NEXT: li a5, 0 -; RV32IXQCI-NEXT: qc.mveq a2, a0, a2, a0 -; RV32IXQCI-NEXT: qc.mvne a4, a2, a4, a2 -; RV32IXQCI-NEXT: qc.mvltu t5, t5, a4, a4 -; RV32IXQCI-NEXT: qc.mvgeu t4, t5, t4, t5 -; RV32IXQCI-NEXT: qc.mvltu t3, t4, t3, t4 -; RV32IXQCI-NEXT: qc.mvgeu t2, t2, t3, t3 -; RV32IXQCI-NEXT: qc.mvlt t0, t0, t2, t2 -; RV32IXQCI-NEXT: qc.mvge a7, t0, a7, t0 -; RV32IXQCI-NEXT: qc.mvlt a6, a7, a6, a7 -; RV32IXQCI-NEXT: qc.mvge a3, a3, a6, a6 -; RV32IXQCI-NEXT: qc.mvlt a3, a5, t1, t1 -; RV32IXQCI-NEXT: mv a5, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: .LBB0_2: -; RV32IXQCI-NEXT: lw a2, 0(a1) -; RV32IXQCI-NEXT: lw a0, 0(a1) -; RV32IXQCI-NEXT: li a1, 1024 -; RV32IXQCI-NEXT: qc.mvlt a2, a1, a2, a5 -; RV32IXQCI-NEXT: li a1, 2046 -; RV32IXQCI-NEXT: qc.mvltu a0, a1, t1, a2 +; RV32IXQCI-NEXT: qc.mvlti a0, a2, 0, a3 +; RV32IXQCI-NEXT: li a3, 1024 +; RV32IXQCI-NEXT: qc.mvge a0, a3, a4, a4 +; RV32IXQCI-NEXT: li a3, 2046 +; RV32IXQCI-NEXT: qc.mvgeu a0, a3, a2, a1 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: foo: diff --git a/llvm/test/CodeGen/RISCV/select-cond.ll b/llvm/test/CodeGen/RISCV/select-cond.ll index 3ca0f46..a3c48737 100644 --- a/llvm/test/CodeGen/RISCV/select-cond.ll +++ b/llvm/test/CodeGen/RISCV/select-cond.ll @@ -7,7 +7,7 @@ ; RUN: | FileCheck %s --check-prefixes=RV32-XQCICM ; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcics -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32-XQCICS -; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCI ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV64 diff --git a/llvm/test/CodeGen/RISCV/select-const.ll b/llvm/test/CodeGen/RISCV/select-const.ll index 65d10bb..dfac6e1 100644 --- a/llvm/test/CodeGen/RISCV/select-const.ll +++ b/llvm/test/CodeGen/RISCV/select-const.ll @@ -5,7 +5,7 @@ ; RUN: | FileCheck -check-prefixes=RV32,RV32IF %s ; RUN: llc -mtriple=riscv32 -mattr=+zicond -target-abi=ilp32 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV32,RV32ZICOND %s -; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCI ; RUN: llc -mtriple=riscv64 -target-abi=lp64 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV64,RV64I %s @@ -579,9 +579,9 @@ define i32 @select_slt_zero_constant1_constant2(i32 signext %x) { ; ; RV32IXQCI-LABEL: select_slt_zero_constant1_constant2: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: srai a0, a0, 31 -; RV32IXQCI-NEXT: andi a0, a0, 10 -; RV32IXQCI-NEXT: addi a0, a0, -3 +; RV32IXQCI-NEXT: li a1, -3 +; RV32IXQCI-NEXT: qc.lilti a1, a0, 0, 7 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_slt_zero_constant1_constant2: @@ -605,9 +605,9 @@ define i32 @select_sgt_negative_one_constant1_constant2(i32 signext %x) { ; ; RV32IXQCI-LABEL: select_sgt_negative_one_constant1_constant2: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: srai a0, a0, 31 -; RV32IXQCI-NEXT: andi a0, a0, -10 -; RV32IXQCI-NEXT: addi a0, a0, 7 +; RV32IXQCI-NEXT: li a1, -3 +; RV32IXQCI-NEXT: qc.ligei a1, a0, 0, 7 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_sgt_negative_one_constant1_constant2: @@ -653,12 +653,10 @@ define i32 @select_nonnegative_lui_addi(i32 signext %x) { ; ; RV32IXQCI-LABEL: select_nonnegative_lui_addi: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: mv a1, a0 -; RV32IXQCI-NEXT: lui a0, 4 -; RV32IXQCI-NEXT: bgez a1, .LBB21_2 -; RV32IXQCI-NEXT: # %bb.1: -; RV32IXQCI-NEXT: li a0, 25 -; RV32IXQCI-NEXT: .LBB21_2: +; RV32IXQCI-NEXT: lui a2, 4 +; RV32IXQCI-NEXT: li a1, 25 +; RV32IXQCI-NEXT: qc.mvgei a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: select_nonnegative_lui_addi: @@ -726,12 +724,10 @@ define i32 @select_nonnegative_lui_addi_swapped(i32 signext %x) { ; ; RV32IXQCI-LABEL: select_nonnegative_lui_addi_swapped: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: bgez a0, .LBB22_2 -; RV32IXQCI-NEXT: # %bb.1: -; RV32IXQCI-NEXT: lui a0, 4 -; RV32IXQCI-NEXT: ret -; RV32IXQCI-NEXT: .LBB22_2: -; RV32IXQCI-NEXT: li a0, 25 +; RV32IXQCI-NEXT: li a2, 25 +; RV32IXQCI-NEXT: lui a1, 4 +; RV32IXQCI-NEXT: qc.mvgei a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: select_nonnegative_lui_addi_swapped: @@ -801,13 +797,13 @@ define i32 @diff_shl_addi(i32 signext %x) { ; ; RV32IXQCI-LABEL: diff_shl_addi: ; RV32IXQCI: # %bb.0: +; RV32IXQCI-NEXT: lui a2, 4 +; RV32IXQCI-NEXT: li a1, 25 ; RV32IXQCI-NEXT: bgez a0, .LBB23_2 ; RV32IXQCI-NEXT: # %bb.1: -; RV32IXQCI-NEXT: lui a0, 4 -; RV32IXQCI-NEXT: addi a0, a0, 25 -; RV32IXQCI-NEXT: ret +; RV32IXQCI-NEXT: addi a1, a2, 25 ; RV32IXQCI-NEXT: .LBB23_2: -; RV32IXQCI-NEXT: li a0, 25 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: diff_shl_addi: @@ -876,13 +872,13 @@ define i32 @diff_shl_addi2(i32 signext %x) { ; ; RV32IXQCI-LABEL: diff_shl_addi2: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: bgez a0, .LBB24_2 +; RV32IXQCI-NEXT: lui a2, 4 +; RV32IXQCI-NEXT: li a1, 25 +; RV32IXQCI-NEXT: bltz a0, .LBB24_2 ; RV32IXQCI-NEXT: # %bb.1: -; RV32IXQCI-NEXT: li a0, 25 -; RV32IXQCI-NEXT: ret +; RV32IXQCI-NEXT: addi a1, a2, 25 ; RV32IXQCI-NEXT: .LBB24_2: -; RV32IXQCI-NEXT: lui a0, 4 -; RV32IXQCI-NEXT: addi a0, a0, 25 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: diff_shl_addi2: @@ -929,9 +925,10 @@ define i32 @diff_pow2_24_16(i32 signext %x) { ; ; RV32IXQCI-LABEL: diff_pow2_24_16: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: srai a0, a0, 31 -; RV32IXQCI-NEXT: andi a0, a0, -8 -; RV32IXQCI-NEXT: addi a0, a0, 24 +; RV32IXQCI-NEXT: li a2, 24 +; RV32IXQCI-NEXT: li a1, 16 +; RV32IXQCI-NEXT: qc.mvgei a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: diff_pow2_24_16: @@ -955,9 +952,10 @@ define i32 @diff_pow2_16_24(i32 signext %x) { ; ; RV32IXQCI-LABEL: diff_pow2_16_24: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: srli a0, a0, 28 -; RV32IXQCI-NEXT: andi a0, a0, 8 -; RV32IXQCI-NEXT: addi a0, a0, 16 +; RV32IXQCI-NEXT: li a2, 16 +; RV32IXQCI-NEXT: li a1, 24 +; RV32IXQCI-NEXT: qc.mvgei a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: diff_pow2_16_24: @@ -1008,14 +1006,14 @@ define i32 @zext_or_constant(i32 signext %x) { ; ; RV32IXQCI-LABEL: zext_or_constant: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: bgez a0, .LBB27_2 +; RV32IXQCI-NEXT: srli a2, a0, 31 +; RV32IXQCI-NEXT: lui a1, 140 +; RV32IXQCI-NEXT: addi a1, a1, 417 +; RV32IXQCI-NEXT: bltz a0, .LBB27_2 ; RV32IXQCI-NEXT: # %bb.1: -; RV32IXQCI-NEXT: lui a0, 140 -; RV32IXQCI-NEXT: addi a0, a0, 417 -; RV32IXQCI-NEXT: ret +; RV32IXQCI-NEXT: xori a1, a2, 1 ; RV32IXQCI-NEXT: .LBB27_2: -; RV32IXQCI-NEXT: srli a0, a0, 31 -; RV32IXQCI-NEXT: xori a0, a0, 1 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: zext_or_constant: @@ -1095,14 +1093,14 @@ define i32 @zext_or_constant2(i32 signext %x) { ; ; RV32IXQCI-LABEL: zext_or_constant2: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: bltz a0, .LBB28_2 +; RV32IXQCI-NEXT: srli a2, a0, 31 +; RV32IXQCI-NEXT: lui a1, 140 +; RV32IXQCI-NEXT: addi a1, a1, 417 +; RV32IXQCI-NEXT: bgez a0, .LBB28_2 ; RV32IXQCI-NEXT: # %bb.1: -; RV32IXQCI-NEXT: lui a0, 140 -; RV32IXQCI-NEXT: addi a0, a0, 417 -; RV32IXQCI-NEXT: ret +; RV32IXQCI-NEXT: xori a1, a2, 1 ; RV32IXQCI-NEXT: .LBB28_2: -; RV32IXQCI-NEXT: srli a0, a0, 31 -; RV32IXQCI-NEXT: xori a0, a0, 1 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: zext_or_constant2: @@ -1183,14 +1181,14 @@ define i32 @sext_or_constant(i32 signext %x) { ; ; RV32IXQCI-LABEL: sext_or_constant: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: bgez a0, .LBB29_2 +; RV32IXQCI-NEXT: srli a2, a0, 31 +; RV32IXQCI-NEXT: lui a1, 140 +; RV32IXQCI-NEXT: addi a1, a1, 417 +; RV32IXQCI-NEXT: bltz a0, .LBB29_2 ; RV32IXQCI-NEXT: # %bb.1: -; RV32IXQCI-NEXT: lui a0, 140 -; RV32IXQCI-NEXT: addi a0, a0, 417 -; RV32IXQCI-NEXT: ret +; RV32IXQCI-NEXT: addi a1, a2, -1 ; RV32IXQCI-NEXT: .LBB29_2: -; RV32IXQCI-NEXT: srli a0, a0, 31 -; RV32IXQCI-NEXT: addi a0, a0, -1 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: sext_or_constant: @@ -1271,14 +1269,14 @@ define i32 @sext_or_constant2(i32 signext %x) { ; ; RV32IXQCI-LABEL: sext_or_constant2: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: bltz a0, .LBB30_2 +; RV32IXQCI-NEXT: srli a2, a0, 31 +; RV32IXQCI-NEXT: lui a1, 140 +; RV32IXQCI-NEXT: addi a1, a1, 417 +; RV32IXQCI-NEXT: bgez a0, .LBB30_2 ; RV32IXQCI-NEXT: # %bb.1: -; RV32IXQCI-NEXT: lui a0, 140 -; RV32IXQCI-NEXT: addi a0, a0, 417 -; RV32IXQCI-NEXT: ret +; RV32IXQCI-NEXT: addi a1, a2, -1 ; RV32IXQCI-NEXT: .LBB30_2: -; RV32IXQCI-NEXT: srli a0, a0, 31 -; RV32IXQCI-NEXT: addi a0, a0, -1 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: sext_or_constant2: @@ -1332,9 +1330,9 @@ define i32 @select_0_6(i32 signext %x) { ; ; RV32IXQCI-LABEL: select_0_6: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: srai a0, a0, 2 -; RV32IXQCI-NEXT: srli a0, a0, 30 -; RV32IXQCI-NEXT: slli a0, a0, 1 +; RV32IXQCI-NEXT: li a1, 6 +; RV32IXQCI-NEXT: qc.ligei a1, a0, 0, 0 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_0_6: @@ -1358,9 +1356,9 @@ define i32 @select_6_0(i32 signext %x) { ; ; RV32IXQCI-LABEL: select_6_0: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: srli a0, a0, 31 -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: andi a0, a0, 6 +; RV32IXQCI-NEXT: li a1, 0 +; RV32IXQCI-NEXT: qc.ligei a1, a0, 0, 6 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_6_0: @@ -1383,8 +1381,9 @@ define i32 @select_0_394(i32 signext %x) { ; ; RV32IXQCI-LABEL: select_0_394: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: srai a0, a0, 31 -; RV32IXQCI-NEXT: andi a0, a0, 394 +; RV32IXQCI-NEXT: li a1, 394 +; RV32IXQCI-NEXT: qc.ligei a1, a0, 0, 0 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_0_394: @@ -1407,9 +1406,9 @@ define i32 @select_394_0(i32 signext %x) { ; ; RV32IXQCI-LABEL: select_394_0: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: srli a0, a0, 31 -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: andi a0, a0, 394 +; RV32IXQCI-NEXT: li a1, 394 +; RV32IXQCI-NEXT: qc.lilti a1, a0, 0, 0 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_394_0: diff --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll index 8273c65..1eb47e4c 100644 --- a/llvm/test/CodeGen/RISCV/select.ll +++ b/llvm/test/CodeGen/RISCV/select.ll @@ -4,7 +4,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+xventanacondops -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64IMXVTCONDOPS %s ; RUN: llc -mtriple=riscv32 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECKZICOND,RV32IMZICOND %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECKZICOND,RV64IMZICOND %s -; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCI define i16 @select_xor_1(i16 %A, i8 %cond) { @@ -44,10 +44,11 @@ define i16 @select_xor_1(i16 %A, i8 %cond) { ; ; RV32IXQCI-LABEL: select_xor_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a1, a1, 31 -; RV32IXQCI-NEXT: srai a1, a1, 31 -; RV32IXQCI-NEXT: andi a1, a1, 43 -; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: andi a1, a1, 1 +; RV32IXQCI-NEXT: beqz a1, .LBB0_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xori a0, a0, 43 +; RV32IXQCI-NEXT: .LBB0_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -102,10 +103,11 @@ define i16 @select_xor_1b(i16 %A, i8 %cond) { ; ; RV32IXQCI-LABEL: select_xor_1b: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a1, a1, 31 -; RV32IXQCI-NEXT: srai a1, a1, 31 -; RV32IXQCI-NEXT: andi a1, a1, 43 -; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: andi a1, a1, 1 +; RV32IXQCI-NEXT: beqz a1, .LBB1_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xori a0, a0, 43 +; RV32IXQCI-NEXT: .LBB1_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -148,10 +150,11 @@ define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) { ; ; RV32IXQCI-LABEL: select_xor_2: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a2, a2, 31 -; RV32IXQCI-NEXT: srai a2, a2, 31 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB2_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB2_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -196,10 +199,11 @@ define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) { ; ; RV32IXQCI-LABEL: select_xor_2b: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a2, a2, 31 -; RV32IXQCI-NEXT: srai a2, a2, 31 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB3_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB3_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -221,9 +225,10 @@ define i16 @select_xor_3(i16 %A, i8 %cond) { ; RV32IXQCI-LABEL: select_xor_3: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a1, a1, 1 -; RV32IXQCI-NEXT: addi a1, a1, -1 -; RV32IXQCI-NEXT: andi a1, a1, 43 -; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: bnez a1, .LBB4_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xori a0, a0, 43 +; RV32IXQCI-NEXT: .LBB4_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -247,9 +252,10 @@ define i16 @select_xor_3b(i16 %A, i8 %cond) { ; RV32IXQCI-LABEL: select_xor_3b: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a1, a1, 1 -; RV32IXQCI-NEXT: addi a1, a1, -1 -; RV32IXQCI-NEXT: andi a1, a1, 43 -; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: bnez a1, .LBB5_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xori a0, a0, 43 +; RV32IXQCI-NEXT: .LBB5_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -293,9 +299,10 @@ define i32 @select_xor_4(i32 %A, i32 %B, i8 %cond) { ; RV32IXQCI-LABEL: select_xor_4: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a2, a2, 1 -; RV32IXQCI-NEXT: addi a2, a2, -1 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: bnez a2, .LBB6_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB6_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -341,9 +348,10 @@ define i32 @select_xor_4b(i32 %A, i32 %B, i8 %cond) { ; RV32IXQCI-LABEL: select_xor_4b: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a2, a2, 1 -; RV32IXQCI-NEXT: addi a2, a2, -1 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: bnez a2, .LBB7_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB7_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -382,9 +390,12 @@ define i32 @select_xor_5(i1 zeroext %cond, i32 %x) { ; ; RV32IXQCI-LABEL: select_xor_5: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a1 -; RV32IXQCI-NEXT: xori a0, a0, 128 +; RV32IXQCI-NEXT: li a2, 128 +; RV32IXQCI-NEXT: bnez a0, .LBB8_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: xori a2, a1, 128 +; RV32IXQCI-NEXT: .LBB8_2: +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret %add = xor i32 %x, 128 %sel = select i1 %cond, i32 128, i32 %add @@ -424,10 +435,11 @@ define i32 @select_or(i32 %A, i32 %B, i8 %cond) { ; ; RV32IXQCI-LABEL: select_or: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a2, a2, 31 -; RV32IXQCI-NEXT: srai a2, a2, 31 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB9_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB9_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -472,10 +484,11 @@ define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) { ; ; RV32IXQCI-LABEL: select_or_b: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a2, a2, 31 -; RV32IXQCI-NEXT: srai a2, a2, 31 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB10_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB10_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -518,10 +531,11 @@ define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) { ; ; RV32IXQCI-LABEL: select_or_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a2, a2, 31 -; RV32IXQCI-NEXT: srai a2, a2, 31 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB11_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB11_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i32 %cond, 1 @@ -566,10 +580,11 @@ define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) { ; ; RV32IXQCI-LABEL: select_or_1b: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a2, a2, 31 -; RV32IXQCI-NEXT: srai a2, a2, 31 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB12_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB12_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i32 %cond, 1 @@ -613,9 +628,10 @@ define i32 @select_or_2(i32 %A, i32 %B, i8 %cond) { ; RV32IXQCI-LABEL: select_or_2: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a2, a2, 1 -; RV32IXQCI-NEXT: addi a2, a2, -1 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: bnez a2, .LBB13_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB13_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -661,9 +677,10 @@ define i32 @select_or_2b(i32 %A, i32 %B, i8 %cond) { ; RV32IXQCI-LABEL: select_or_2b: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a2, a2, 1 -; RV32IXQCI-NEXT: addi a2, a2, -1 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: bnez a2, .LBB14_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB14_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -707,9 +724,10 @@ define i32 @select_or_3(i32 %A, i32 %B, i32 %cond) { ; RV32IXQCI-LABEL: select_or_3: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a2, a2, 1 -; RV32IXQCI-NEXT: addi a2, a2, -1 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: bnez a2, .LBB15_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB15_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i32 %cond, 1 @@ -755,9 +773,10 @@ define i32 @select_or_3b(i32 %A, i32 %B, i32 %cond) { ; RV32IXQCI-LABEL: select_or_3b: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a2, a2, 1 -; RV32IXQCI-NEXT: addi a2, a2, -1 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: bnez a2, .LBB16_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB16_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i32 %cond, 1 @@ -796,9 +815,12 @@ define i32 @select_or_4(i1 zeroext %cond, i32 %x) { ; ; RV32IXQCI-LABEL: select_or_4: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a1 -; RV32IXQCI-NEXT: ori a0, a0, 128 +; RV32IXQCI-NEXT: li a2, 128 +; RV32IXQCI-NEXT: bnez a0, .LBB17_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: ori a2, a1, 128 +; RV32IXQCI-NEXT: .LBB17_2: +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret %add = or i32 %x, 128 %sel = select i1 %cond, i32 128, i32 %add @@ -840,9 +862,11 @@ define i32 @select_add_1(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_add_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: neg a0, a0 -; RV32IXQCI-NEXT: and a0, a0, a1 -; RV32IXQCI-NEXT: add a0, a0, a2 +; RV32IXQCI-NEXT: beqz a0, .LBB18_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: add a2, a2, a1 +; RV32IXQCI-NEXT: .LBB18_2: # %entry +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %c = add i32 %a, %b @@ -885,9 +909,11 @@ define i32 @select_add_2(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_add_2: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a2 -; RV32IXQCI-NEXT: add a0, a0, a1 +; RV32IXQCI-NEXT: bnez a0, .LBB19_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: add a1, a1, a2 +; RV32IXQCI-NEXT: .LBB19_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = add i32 %a, %b @@ -933,9 +959,11 @@ define i32 @select_add_3(i1 zeroext %cond, i32 %a) { ; ; RV32IXQCI-LABEL: select_add_3: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: andi a0, a0, 42 -; RV32IXQCI-NEXT: add a0, a0, a1 +; RV32IXQCI-NEXT: bnez a0, .LBB20_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: addi a1, a1, 42 +; RV32IXQCI-NEXT: .LBB20_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = add i32 %a, 42 @@ -978,9 +1006,12 @@ define i32 @select_add_4(i1 zeroext %cond, i32 %x) { ; ; RV32IXQCI-LABEL: select_add_4: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a1 -; RV32IXQCI-NEXT: addi a0, a0, 128 +; RV32IXQCI-NEXT: li a2, 128 +; RV32IXQCI-NEXT: bnez a0, .LBB21_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: addi a2, a1, 128 +; RV32IXQCI-NEXT: .LBB21_2: +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret %add = add i32 %x, 128 %sel = select i1 %cond, i32 128, i32 %add @@ -1029,12 +1060,14 @@ define i64 @select_add_5(i1 zeroext %cond, i64 %x) { ; ; RV32IXQCI-LABEL: select_add_5: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: addi a3, a0, -1 -; RV32IXQCI-NEXT: and a1, a1, a3 -; RV32IXQCI-NEXT: addi a0, a1, 128 -; RV32IXQCI-NEXT: sltu a1, a0, a1 -; RV32IXQCI-NEXT: and a2, a2, a3 -; RV32IXQCI-NEXT: add a1, a1, a2 +; RV32IXQCI-NEXT: mv a3, a0 +; RV32IXQCI-NEXT: addi a4, a1, 128 +; RV32IXQCI-NEXT: sltu a0, a4, a1 +; RV32IXQCI-NEXT: add a2, a2, a0 +; RV32IXQCI-NEXT: li a0, 128 +; RV32IXQCI-NEXT: qc.mveqi a0, a3, 0, a4 +; RV32IXQCI-NEXT: qc.selectieqi a3, 0, a2, 0 +; RV32IXQCI-NEXT: mv a1, a3 ; RV32IXQCI-NEXT: ret %add = add i64 %x, 128 %sel = select i1 %cond, i64 128, i64 %add @@ -1093,14 +1126,15 @@ define i64 @select_add_6(i1 zeroext %cond, i64 %x) { ; ; RV32IXQCI-LABEL: select_add_6: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: addi a3, a0, -1 +; RV32IXQCI-NEXT: mv a3, a0 ; RV32IXQCI-NEXT: lui a0, 14 -; RV32IXQCI-NEXT: and a1, a1, a3 -; RV32IXQCI-NEXT: addi a0, a0, 1005 -; RV32IXQCI-NEXT: add a0, a0, a1 +; RV32IXQCI-NEXT: addi a4, a0, 1005 +; RV32IXQCI-NEXT: add a0, a1, a4 ; RV32IXQCI-NEXT: sltu a1, a0, a1 -; RV32IXQCI-NEXT: and a2, a2, a3 ; RV32IXQCI-NEXT: add a1, a1, a2 +; RV32IXQCI-NEXT: qc.mvnei a0, a3, 0, a4 +; RV32IXQCI-NEXT: qc.selectieqi a3, 0, a1, 0 +; RV32IXQCI-NEXT: mv a1, a3 ; RV32IXQCI-NEXT: ret %add = add i64 %x, 58349 %sel = select i1 %cond, i64 58349, i64 %add @@ -1152,9 +1186,11 @@ define i32 @select_sub_1(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_sub_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: sub a1, a1, a2 -; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 -; RV32IXQCI-NEXT: mv a0, a1 +; RV32IXQCI-NEXT: beqz a0, .LBB24_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: sub a2, a1, a2 +; RV32IXQCI-NEXT: .LBB24_2: # %entry +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %c = sub i32 %a, %b @@ -1197,9 +1233,11 @@ define i32 @select_sub_2(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_sub_2: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a2 -; RV32IXQCI-NEXT: sub a0, a1, a0 +; RV32IXQCI-NEXT: bnez a0, .LBB25_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: sub a1, a1, a2 +; RV32IXQCI-NEXT: .LBB25_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = sub i32 %a, %b @@ -1245,9 +1283,11 @@ define i32 @select_sub_3(i1 zeroext %cond, i32 %a) { ; ; RV32IXQCI-LABEL: select_sub_3: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: andi a0, a0, 42 -; RV32IXQCI-NEXT: sub a0, a1, a0 +; RV32IXQCI-NEXT: bnez a0, .LBB26_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: addi a1, a1, -42 +; RV32IXQCI-NEXT: .LBB26_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = sub i32 %a, 42 @@ -1301,10 +1341,12 @@ define i32 @select_sub_4(i1 zeroext %cond, i32 %x) { ; ; RV32IXQCI-LABEL: select_sub_4: ; RV32IXQCI: # %bb.0: +; RV32IXQCI-NEXT: li a2, 128 +; RV32IXQCI-NEXT: bnez a0, .LBB27_2 +; RV32IXQCI-NEXT: # %bb.1: ; RV32IXQCI-NEXT: addi a2, a1, -128 -; RV32IXQCI-NEXT: li a1, 128 -; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 -; RV32IXQCI-NEXT: mv a0, a1 +; RV32IXQCI-NEXT: .LBB27_2: +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret %add = sub i32 %x, 128 %sel = select i1 %cond, i32 128, i32 %add @@ -1347,9 +1389,11 @@ define i32 @select_and_1(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_and_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: and a1, a1, a2 -; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 -; RV32IXQCI-NEXT: mv a0, a1 +; RV32IXQCI-NEXT: beqz a0, .LBB28_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: and a2, a2, a1 +; RV32IXQCI-NEXT: .LBB28_2: # %entry +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %c = and i32 %a, %b @@ -1392,9 +1436,11 @@ define i32 @select_and_2(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_and_2: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: and a2, a2, a1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: bnez a0, .LBB29_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: .LBB29_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = and i32 %a, %b @@ -1437,9 +1483,11 @@ define i32 @select_and_3(i1 zeroext %cond, i32 %a) { ; ; RV32IXQCI-LABEL: select_and_3: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: andi a2, a1, 42 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: bnez a0, .LBB30_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: andi a1, a1, 42 +; RV32IXQCI-NEXT: .LBB30_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = and i32 %a, 42 @@ -1626,9 +1674,11 @@ define i32 @select_udiv_3(i1 zeroext %cond, i32 %a) { ; RV32IXQCI-NEXT: lui a3, 199729 ; RV32IXQCI-NEXT: addi a3, a3, -975 ; RV32IXQCI-NEXT: mulhu a2, a2, a3 -; RV32IXQCI-NEXT: srli a2, a2, 2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: bnez a0, .LBB33_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: srli a1, a2, 2 +; RV32IXQCI-NEXT: .LBB33_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = udiv i32 %a, 42 @@ -1681,9 +1731,11 @@ define i32 @select_shl_1(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_shl_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: sll a1, a1, a2 -; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 -; RV32IXQCI-NEXT: mv a0, a1 +; RV32IXQCI-NEXT: beqz a0, .LBB34_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: sll a2, a1, a2 +; RV32IXQCI-NEXT: .LBB34_2: # %entry +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %c = shl i32 %a, %b @@ -1726,9 +1778,11 @@ define i32 @select_shl_2(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_shl_2: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a2 -; RV32IXQCI-NEXT: sll a0, a1, a0 +; RV32IXQCI-NEXT: bnez a0, .LBB35_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: sll a1, a1, a2 +; RV32IXQCI-NEXT: .LBB35_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = shl i32 %a, %b @@ -1797,9 +1851,11 @@ define i32 @select_ashr_1(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_ashr_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: sra a1, a1, a2 -; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 -; RV32IXQCI-NEXT: mv a0, a1 +; RV32IXQCI-NEXT: beqz a0, .LBB37_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: sra a2, a1, a2 +; RV32IXQCI-NEXT: .LBB37_2: # %entry +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %c = ashr i32 %a, %b @@ -1842,9 +1898,11 @@ define i32 @select_ashr_2(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_ashr_2: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a2 -; RV32IXQCI-NEXT: sra a0, a1, a0 +; RV32IXQCI-NEXT: bnez a0, .LBB38_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: sra a1, a1, a2 +; RV32IXQCI-NEXT: .LBB38_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = ashr i32 %a, %b @@ -1913,9 +1971,11 @@ define i32 @select_lshr_1(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_lshr_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: srl a1, a1, a2 -; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 -; RV32IXQCI-NEXT: mv a0, a1 +; RV32IXQCI-NEXT: beqz a0, .LBB40_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: srl a2, a1, a2 +; RV32IXQCI-NEXT: .LBB40_2: # %entry +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %c = lshr i32 %a, %b @@ -1958,9 +2018,11 @@ define i32 @select_lshr_2(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_lshr_2: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a2 -; RV32IXQCI-NEXT: srl a0, a1, a0 +; RV32IXQCI-NEXT: bnez a0, .LBB41_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: srl a1, a1, a2 +; RV32IXQCI-NEXT: .LBB41_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = lshr i32 %a, %b @@ -2304,11 +2366,13 @@ define i32 @select_cst3(i1 zeroext %cond) { ; ; RV32IXQCI-LABEL: select_cst3: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: lui a1, 7 -; RV32IXQCI-NEXT: lui a2, 5 -; RV32IXQCI-NEXT: addi a3, a1, 1328 -; RV32IXQCI-NEXT: addi a1, a2, -480 -; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a3 +; RV32IXQCI-NEXT: lui a2, 7 +; RV32IXQCI-NEXT: lui a1, 5 +; RV32IXQCI-NEXT: addi a1, a1, -480 +; RV32IXQCI-NEXT: beqz a0, .LBB51_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: addi a1, a2, 1328 +; RV32IXQCI-NEXT: .LBB51_2: ; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %ret = select i1 %cond, i32 30000, i32 20000 @@ -2370,10 +2434,12 @@ define i32 @select_cst5(i1 zeroext %cond) { ; ; RV32IXQCI-LABEL: select_cst5: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: lui a1, 1 -; RV32IXQCI-NEXT: addi a2, a1, -2047 +; RV32IXQCI-NEXT: lui a2, 1 ; RV32IXQCI-NEXT: li a1, 2047 -; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: bnez a0, .LBB53_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: addi a1, a2, -2047 +; RV32IXQCI-NEXT: .LBB53_2: ; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %ret = select i1 %cond, i32 2047, i32 2049 @@ -2417,10 +2483,12 @@ define i32 @select_cst5_invert(i1 zeroext %cond) { ; ; RV32IXQCI-LABEL: select_cst5_invert: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: lui a1, 1 -; RV32IXQCI-NEXT: addi a2, a1, -2047 +; RV32IXQCI-NEXT: lui a2, 1 ; RV32IXQCI-NEXT: li a1, 2047 -; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a2 +; RV32IXQCI-NEXT: beqz a0, .LBB54_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: addi a1, a2, -2047 +; RV32IXQCI-NEXT: .LBB54_2: ; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %ret = select i1 %cond, i32 2049, i32 2047 diff --git a/llvm/test/CodeGen/RISCV/xqcicli.ll b/llvm/test/CodeGen/RISCV/xqcicli.ll index 8b97616..8d4caa1 100644 --- a/llvm/test/CodeGen/RISCV/xqcicli.ll +++ b/llvm/test/CodeGen/RISCV/xqcicli.ll @@ -4,7 +4,7 @@ ; RUN: | FileCheck %s --check-prefixes=RV32I ; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicli -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCICLI -; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCI define i32 @select_cc_example_eq(i32 %a, i32 %b, i32 %x, i32 %y) { diff --git a/llvm/test/CodeGen/RISCV/xqcicm.ll b/llvm/test/CodeGen/RISCV/xqcicm.ll index fb48301..8e93496 100644 --- a/llvm/test/CodeGen/RISCV/xqcicm.ll +++ b/llvm/test/CodeGen/RISCV/xqcicm.ll @@ -6,7 +6,7 @@ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCICM ; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCICM -; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCI define i32 @select_example(i32 %cond, i32 %x, i32 %y) { diff --git a/llvm/test/CodeGen/RISCV/xqcics.ll b/llvm/test/CodeGen/RISCV/xqcics.ll index 60fc98c..c0839c9 100644 --- a/llvm/test/CodeGen/RISCV/xqcics.ll +++ b/llvm/test/CodeGen/RISCV/xqcics.ll @@ -6,7 +6,7 @@ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCICS ; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcics,+experimental-xqcicm -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCICM -; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCI define i32 @select_cc_example_eq_s1(i32 %a, i32 %b, i32 %x, i32 %y) { diff --git a/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll b/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll index 41d2c02..5a79659 100644 --- a/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll +++ b/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll @@ -348,38 +348,35 @@ entry: define <4 x float> @vector_add_f32(<4 x float> %lhs, <4 x float> %rhs) { ; CHECK-MVE-LABEL: vector_add_f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: .save {r4, r5, r6, r7, lr} -; CHECK-MVE-NEXT: push {r4, r5, r6, r7, lr} -; CHECK-MVE-NEXT: .pad #4 -; CHECK-MVE-NEXT: sub sp, #4 +; CHECK-MVE-NEXT: .save {r4, r5, r6, r7, r8, lr} +; CHECK-MVE-NEXT: push.w {r4, r5, r6, r7, r8, lr} ; CHECK-MVE-NEXT: .vsave {d8, d9} ; CHECK-MVE-NEXT: vpush {d8, d9} -; CHECK-MVE-NEXT: mov r4, r0 +; CHECK-MVE-NEXT: mov r8, r0 ; CHECK-MVE-NEXT: add r0, sp, #40 ; CHECK-MVE-NEXT: vldrw.u32 q4, [r0] -; CHECK-MVE-NEXT: mov r6, r1 +; CHECK-MVE-NEXT: mov r7, r1 ; CHECK-MVE-NEXT: mov r0, r3 -; CHECK-MVE-NEXT: mov r5, r2 -; CHECK-MVE-NEXT: vmov r7, r1, d9 +; CHECK-MVE-NEXT: mov r6, r2 +; CHECK-MVE-NEXT: vmov r4, r1, d9 ; CHECK-MVE-NEXT: bl __aeabi_fadd -; CHECK-MVE-NEXT: vmov s19, r0 -; CHECK-MVE-NEXT: mov r0, r5 -; CHECK-MVE-NEXT: mov r1, r7 -; CHECK-MVE-NEXT: bl __aeabi_fadd -; CHECK-MVE-NEXT: vmov r5, r1, d8 -; CHECK-MVE-NEXT: vmov s18, r0 +; CHECK-MVE-NEXT: mov r5, r0 ; CHECK-MVE-NEXT: mov r0, r6 +; CHECK-MVE-NEXT: mov r1, r4 ; CHECK-MVE-NEXT: bl __aeabi_fadd -; CHECK-MVE-NEXT: vmov s17, r0 -; CHECK-MVE-NEXT: mov r0, r4 -; CHECK-MVE-NEXT: mov r1, r5 +; CHECK-MVE-NEXT: vmov r6, r1, d8 +; CHECK-MVE-NEXT: mov r4, r0 +; CHECK-MVE-NEXT: mov r0, r7 ; CHECK-MVE-NEXT: bl __aeabi_fadd -; CHECK-MVE-NEXT: vmov s16, r0 -; CHECK-MVE-NEXT: vmov r2, r3, d9 -; CHECK-MVE-NEXT: vmov r0, r1, d8 +; CHECK-MVE-NEXT: mov r7, r0 +; CHECK-MVE-NEXT: mov r0, r8 +; CHECK-MVE-NEXT: mov r1, r6 +; CHECK-MVE-NEXT: bl __aeabi_fadd +; CHECK-MVE-NEXT: mov r1, r7 +; CHECK-MVE-NEXT: mov r2, r4 +; CHECK-MVE-NEXT: mov r3, r5 ; CHECK-MVE-NEXT: vpop {d8, d9} -; CHECK-MVE-NEXT: add sp, #4 -; CHECK-MVE-NEXT: pop {r4, r5, r6, r7, pc} +; CHECK-MVE-NEXT: pop.w {r4, r5, r6, r7, r8, pc} ; ; CHECK-BE-LABEL: vector_add_f32: ; CHECK-BE: @ %bb.0: @ %entry diff --git a/llvm/test/CodeGen/Thumb2/mve-vld3.ll b/llvm/test/CodeGen/Thumb2/mve-vld3.ll index 4dd9173..93b5e3f 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vld3.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vld3.ll @@ -33,53 +33,29 @@ entry: } define void @vld3_v4i32(ptr %src, ptr %dst) { -; CHECK-LV-LABEL: vld3_v4i32: -; CHECK-LV: @ %bb.0: @ %entry -; CHECK-LV-NEXT: .vsave {d8, d9} -; CHECK-LV-NEXT: vpush {d8, d9} -; CHECK-LV-NEXT: vldrw.u32 q0, [r0, #16] -; CHECK-LV-NEXT: vldrw.u32 q1, [r0] -; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #32] -; CHECK-LV-NEXT: vmov.f32 s10, s2 -; CHECK-LV-NEXT: vmov.f32 s13, s0 -; CHECK-LV-NEXT: vmov.f32 s14, s3 -; CHECK-LV-NEXT: vmov.f32 s8, s4 -; CHECK-LV-NEXT: vmov.f32 s9, s7 -; CHECK-LV-NEXT: vmov.f32 s12, s5 -; CHECK-LV-NEXT: vmov.f32 s15, s18 -; CHECK-LV-NEXT: vmov.f32 s11, s17 -; CHECK-LV-NEXT: vadd.i32 q2, q2, q3 -; CHECK-LV-NEXT: vmov.f32 s0, s6 -; CHECK-LV-NEXT: vmov.f32 s2, s16 -; CHECK-LV-NEXT: vmov.f32 s3, s19 -; CHECK-LV-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LV-NEXT: vstrw.32 q0, [r1] -; CHECK-LV-NEXT: vpop {d8, d9} -; CHECK-LV-NEXT: bx lr -; -; CHECK-LIS-LABEL: vld3_v4i32: -; CHECK-LIS: @ %bb.0: @ %entry -; CHECK-LIS-NEXT: .vsave {d8, d9} -; CHECK-LIS-NEXT: vpush {d8, d9} -; CHECK-LIS-NEXT: vldrw.u32 q0, [r0, #16] -; CHECK-LIS-NEXT: vldrw.u32 q1, [r0] -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-LIS-NEXT: vmov.f32 s10, s2 -; CHECK-LIS-NEXT: vmov.f32 s17, s0 -; CHECK-LIS-NEXT: vmov.f32 s18, s3 -; CHECK-LIS-NEXT: vmov.f32 s8, s4 -; CHECK-LIS-NEXT: vmov.f32 s9, s7 -; CHECK-LIS-NEXT: vmov.f32 s16, s5 -; CHECK-LIS-NEXT: vmov.f32 s19, s14 -; CHECK-LIS-NEXT: vmov.f32 s11, s13 -; CHECK-LIS-NEXT: vadd.i32 q2, q2, q4 -; CHECK-LIS-NEXT: vmov.f32 s0, s6 -; CHECK-LIS-NEXT: vmov.f32 s2, s12 -; CHECK-LIS-NEXT: vmov.f32 s3, s15 -; CHECK-LIS-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LIS-NEXT: vstrw.32 q0, [r1] -; CHECK-LIS-NEXT: vpop {d8, d9} -; CHECK-LIS-NEXT: bx lr +; CHECK-LABEL: vld3_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vldrw.u32 q0, [r0, #16] +; CHECK-NEXT: vldrw.u32 q1, [r0] +; CHECK-NEXT: vldrw.u32 q4, [r0, #32] +; CHECK-NEXT: vmov.f32 s10, s2 +; CHECK-NEXT: vmov.f32 s13, s0 +; CHECK-NEXT: vmov.f32 s14, s3 +; CHECK-NEXT: vmov.f32 s8, s4 +; CHECK-NEXT: vmov.f32 s9, s7 +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmov.f32 s15, s18 +; CHECK-NEXT: vmov.f32 s11, s17 +; CHECK-NEXT: vadd.i32 q2, q2, q3 +; CHECK-NEXT: vmov.f32 s0, s6 +; CHECK-NEXT: vmov.f32 s2, s16 +; CHECK-NEXT: vmov.f32 s3, s19 +; CHECK-NEXT: vadd.i32 q0, q2, q0 +; CHECK-NEXT: vstrw.32 q0, [r1] +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr entry: %l1 = load <12 x i32>, ptr %src, align 4 @@ -93,87 +69,46 @@ entry: } define void @vld3_v8i32(ptr %src, ptr %dst) { -; CHECK-LV-LABEL: vld3_v8i32: -; CHECK-LV: @ %bb.0: @ %entry -; CHECK-LV-NEXT: .vsave {d8, d9, d10, d11} -; CHECK-LV-NEXT: vpush {d8, d9, d10, d11} -; CHECK-LV-NEXT: vldrw.u32 q0, [r0, #64] -; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #48] -; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #80] -; CHECK-LV-NEXT: vmov.f32 s10, s2 -; CHECK-LV-NEXT: vmov.f32 s13, s0 -; CHECK-LV-NEXT: vmov.f32 s14, s3 -; CHECK-LV-NEXT: vmov.f32 s8, s4 -; CHECK-LV-NEXT: vmov.f32 s9, s7 -; CHECK-LV-NEXT: vmov.f32 s12, s5 -; CHECK-LV-NEXT: vmov.f32 s15, s18 -; CHECK-LV-NEXT: vmov.f32 s11, s17 -; CHECK-LV-NEXT: vadd.i32 q2, q2, q3 -; CHECK-LV-NEXT: vmov.f32 s0, s6 -; CHECK-LV-NEXT: vmov.f32 s2, s16 -; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #16] -; CHECK-LV-NEXT: vmov.f32 s3, s19 -; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-LV-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LV-NEXT: vldrw.u32 q2, [r0] -; CHECK-LV-NEXT: vmov.f32 s17, s4 -; CHECK-LV-NEXT: vstrw.32 q0, [r1, #16] -; CHECK-LV-NEXT: vmov.f32 s18, s7 -; CHECK-LV-NEXT: vmov.f32 s22, s6 -; CHECK-LV-NEXT: vmov.f32 s16, s9 -; CHECK-LV-NEXT: vmov.f32 s19, s14 -; CHECK-LV-NEXT: vmov.f32 s20, s8 -; CHECK-LV-NEXT: vmov.f32 s21, s11 -; CHECK-LV-NEXT: vmov.f32 s23, s13 -; CHECK-LV-NEXT: vadd.i32 q4, q5, q4 -; CHECK-LV-NEXT: vmov.f32 s4, s10 -; CHECK-LV-NEXT: vmov.f32 s6, s12 -; CHECK-LV-NEXT: vmov.f32 s7, s15 -; CHECK-LV-NEXT: vadd.i32 q1, q4, q1 -; CHECK-LV-NEXT: vstrw.32 q1, [r1] -; CHECK-LV-NEXT: vpop {d8, d9, d10, d11} -; CHECK-LV-NEXT: bx lr -; -; CHECK-LIS-LABEL: vld3_v8i32: -; CHECK-LIS: @ %bb.0: @ %entry -; CHECK-LIS-NEXT: .vsave {d8, d9, d10, d11} -; CHECK-LIS-NEXT: vpush {d8, d9, d10, d11} -; CHECK-LIS-NEXT: vldrw.u32 q0, [r0, #64] -; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #48] -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #80] -; CHECK-LIS-NEXT: vmov.f32 s10, s2 -; CHECK-LIS-NEXT: vmov.f32 s17, s0 -; CHECK-LIS-NEXT: vmov.f32 s18, s3 -; CHECK-LIS-NEXT: vmov.f32 s8, s4 -; CHECK-LIS-NEXT: vmov.f32 s9, s7 -; CHECK-LIS-NEXT: vmov.f32 s16, s5 -; CHECK-LIS-NEXT: vmov.f32 s19, s14 -; CHECK-LIS-NEXT: vmov.f32 s11, s13 -; CHECK-LIS-NEXT: vmov.f32 s0, s6 -; CHECK-LIS-NEXT: vadd.i32 q2, q2, q4 -; CHECK-LIS-NEXT: vmov.f32 s2, s12 -; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #16] -; CHECK-LIS-NEXT: vmov.f32 s3, s15 -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-LIS-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LIS-NEXT: vldrw.u32 q2, [r0] -; CHECK-LIS-NEXT: vmov.f32 s17, s4 -; CHECK-LIS-NEXT: vstrw.32 q0, [r1, #16] -; CHECK-LIS-NEXT: vmov.f32 s18, s7 -; CHECK-LIS-NEXT: vmov.f32 s22, s6 -; CHECK-LIS-NEXT: vmov.f32 s16, s9 -; CHECK-LIS-NEXT: vmov.f32 s19, s14 -; CHECK-LIS-NEXT: vmov.f32 s20, s8 -; CHECK-LIS-NEXT: vmov.f32 s21, s11 -; CHECK-LIS-NEXT: vmov.f32 s23, s13 -; CHECK-LIS-NEXT: vadd.i32 q4, q5, q4 -; CHECK-LIS-NEXT: vmov.f32 s4, s10 -; CHECK-LIS-NEXT: vmov.f32 s6, s12 -; CHECK-LIS-NEXT: vmov.f32 s7, s15 -; CHECK-LIS-NEXT: vadd.i32 q1, q4, q1 -; CHECK-LIS-NEXT: vstrw.32 q1, [r1] -; CHECK-LIS-NEXT: vpop {d8, d9, d10, d11} -; CHECK-LIS-NEXT: bx lr +; CHECK-LABEL: vld3_v8i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} +; CHECK-NEXT: vldrw.u32 q0, [r0, #64] +; CHECK-NEXT: vldrw.u32 q1, [r0, #48] +; CHECK-NEXT: vldrw.u32 q4, [r0, #80] +; CHECK-NEXT: vmov.f32 s10, s2 +; CHECK-NEXT: vmov.f32 s13, s0 +; CHECK-NEXT: vmov.f32 s14, s3 +; CHECK-NEXT: vmov.f32 s8, s4 +; CHECK-NEXT: vmov.f32 s9, s7 +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmov.f32 s15, s18 +; CHECK-NEXT: vmov.f32 s11, s17 +; CHECK-NEXT: vadd.i32 q2, q2, q3 +; CHECK-NEXT: vmov.f32 s0, s6 +; CHECK-NEXT: vmov.f32 s2, s16 +; CHECK-NEXT: vldrw.u32 q1, [r0, #16] +; CHECK-NEXT: vmov.f32 s3, s19 +; CHECK-NEXT: vldrw.u32 q3, [r0, #32] +; CHECK-NEXT: vadd.i32 q0, q2, q0 +; CHECK-NEXT: vldrw.u32 q2, [r0] +; CHECK-NEXT: vmov.f32 s17, s4 +; CHECK-NEXT: vstrw.32 q0, [r1, #16] +; CHECK-NEXT: vmov.f32 s18, s7 +; CHECK-NEXT: vmov.f32 s22, s6 +; CHECK-NEXT: vmov.f32 s16, s9 +; CHECK-NEXT: vmov.f32 s19, s14 +; CHECK-NEXT: vmov.f32 s20, s8 +; CHECK-NEXT: vmov.f32 s21, s11 +; CHECK-NEXT: vmov.f32 s23, s13 +; CHECK-NEXT: vadd.i32 q4, q5, q4 +; CHECK-NEXT: vmov.f32 s4, s10 +; CHECK-NEXT: vmov.f32 s6, s12 +; CHECK-NEXT: vmov.f32 s7, s15 +; CHECK-NEXT: vadd.i32 q1, q4, q1 +; CHECK-NEXT: vstrw.32 q1, [r1] +; CHECK-NEXT: vpop {d8, d9, d10, d11} +; CHECK-NEXT: bx lr entry: %l1 = load <24 x i32>, ptr %src, align 4 @@ -187,155 +122,80 @@ entry: } define void @vld3_v16i32(ptr %src, ptr %dst) { -; CHECK-LV-LABEL: vld3_v16i32: -; CHECK-LV: @ %bb.0: @ %entry -; CHECK-LV-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LV-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LV-NEXT: vldrw.u32 q0, [r0, #64] -; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #48] -; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #80] -; CHECK-LV-NEXT: vldrw.u32 q6, [r0, #176] -; CHECK-LV-NEXT: vmov.f32 s10, s2 -; CHECK-LV-NEXT: vmov.f32 s13, s0 -; CHECK-LV-NEXT: vmov.f32 s14, s3 -; CHECK-LV-NEXT: vmov.f32 s8, s4 -; CHECK-LV-NEXT: vmov.f32 s9, s7 -; CHECK-LV-NEXT: vmov.f32 s12, s5 -; CHECK-LV-NEXT: vmov.f32 s15, s18 -; CHECK-LV-NEXT: vmov.f32 s11, s17 -; CHECK-LV-NEXT: vadd.i32 q2, q2, q3 -; CHECK-LV-NEXT: vmov.f32 s0, s6 -; CHECK-LV-NEXT: vmov.f32 s2, s16 -; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #16] -; CHECK-LV-NEXT: vmov.f32 s3, s19 -; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-LV-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LV-NEXT: vldrw.u32 q2, [r0] -; CHECK-LV-NEXT: vmov.f32 s17, s4 -; CHECK-LV-NEXT: vmov.f32 s18, s7 -; CHECK-LV-NEXT: vmov.f32 s22, s6 -; CHECK-LV-NEXT: vmov.f32 s16, s9 -; CHECK-LV-NEXT: vmov.f32 s19, s14 -; CHECK-LV-NEXT: vmov.f32 s20, s8 -; CHECK-LV-NEXT: vmov.f32 s21, s11 -; CHECK-LV-NEXT: vmov.f32 s23, s13 -; CHECK-LV-NEXT: vmov.f32 s4, s10 -; CHECK-LV-NEXT: vldrw.u32 q2, [r0, #160] -; CHECK-LV-NEXT: vmov.f32 s6, s12 -; CHECK-LV-NEXT: vadd.i32 q4, q5, q4 -; CHECK-LV-NEXT: vmov.f32 s7, s15 -; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #144] -; CHECK-LV-NEXT: vadd.i32 q1, q4, q1 -; CHECK-LV-NEXT: vmov.f32 s18, s10 -; CHECK-LV-NEXT: vmov.f32 s21, s8 -; CHECK-LV-NEXT: vmov.f32 s22, s11 -; CHECK-LV-NEXT: vmov.f32 s16, s12 -; CHECK-LV-NEXT: vmov.f32 s17, s15 -; CHECK-LV-NEXT: vmov.f32 s20, s13 -; CHECK-LV-NEXT: vmov.f32 s23, s26 -; CHECK-LV-NEXT: vmov.f32 s19, s25 -; CHECK-LV-NEXT: vadd.i32 q4, q4, q5 -; CHECK-LV-NEXT: vmov.f32 s8, s14 -; CHECK-LV-NEXT: vmov.f32 s10, s24 -; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #112] -; CHECK-LV-NEXT: vmov.f32 s11, s27 -; CHECK-LV-NEXT: vldrw.u32 q5, [r0, #128] -; CHECK-LV-NEXT: vadd.i32 q2, q4, q2 -; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #96] -; CHECK-LV-NEXT: vmov.f32 s25, s12 -; CHECK-LV-NEXT: vstrw.32 q2, [r1, #48] -; CHECK-LV-NEXT: vmov.f32 s26, s15 -; CHECK-LV-NEXT: vstrw.32 q0, [r1, #16] -; CHECK-LV-NEXT: vmov.f32 s30, s14 -; CHECK-LV-NEXT: vstrw.32 q1, [r1] -; CHECK-LV-NEXT: vmov.f32 s24, s17 -; CHECK-LV-NEXT: vmov.f32 s27, s22 -; CHECK-LV-NEXT: vmov.f32 s28, s16 -; CHECK-LV-NEXT: vmov.f32 s29, s19 -; CHECK-LV-NEXT: vmov.f32 s31, s21 -; CHECK-LV-NEXT: vadd.i32 q6, q7, q6 -; CHECK-LV-NEXT: vmov.f32 s12, s18 -; CHECK-LV-NEXT: vmov.f32 s14, s20 -; CHECK-LV-NEXT: vmov.f32 s15, s23 -; CHECK-LV-NEXT: vadd.i32 q3, q6, q3 -; CHECK-LV-NEXT: vstrw.32 q3, [r1, #32] -; CHECK-LV-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LV-NEXT: bx lr -; -; CHECK-LIS-LABEL: vld3_v16i32: -; CHECK-LIS: @ %bb.0: @ %entry -; CHECK-LIS-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LIS-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LIS-NEXT: vldrw.u32 q0, [r0, #64] -; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #48] -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #80] -; CHECK-LIS-NEXT: vmov.f32 s10, s2 -; CHECK-LIS-NEXT: vmov.f32 s17, s0 -; CHECK-LIS-NEXT: vmov.f32 s18, s3 -; CHECK-LIS-NEXT: vmov.f32 s8, s4 -; CHECK-LIS-NEXT: vmov.f32 s9, s7 -; CHECK-LIS-NEXT: vmov.f32 s16, s5 -; CHECK-LIS-NEXT: vmov.f32 s19, s14 -; CHECK-LIS-NEXT: vmov.f32 s11, s13 -; CHECK-LIS-NEXT: vmov.f32 s0, s6 -; CHECK-LIS-NEXT: vadd.i32 q2, q2, q4 -; CHECK-LIS-NEXT: vmov.f32 s2, s12 -; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #16] -; CHECK-LIS-NEXT: vmov.f32 s3, s15 -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-LIS-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LIS-NEXT: vldrw.u32 q2, [r0] -; CHECK-LIS-NEXT: vmov.f32 s17, s4 -; CHECK-LIS-NEXT: vmov.f32 s18, s7 -; CHECK-LIS-NEXT: vmov.f32 s22, s6 -; CHECK-LIS-NEXT: vmov.f32 s16, s9 -; CHECK-LIS-NEXT: vmov.f32 s19, s14 -; CHECK-LIS-NEXT: vmov.f32 s20, s8 -; CHECK-LIS-NEXT: vmov.f32 s21, s11 -; CHECK-LIS-NEXT: vmov.f32 s23, s13 -; CHECK-LIS-NEXT: vadd.i32 q4, q5, q4 -; CHECK-LIS-NEXT: vmov.f32 s4, s10 -; CHECK-LIS-NEXT: vldrw.u32 q2, [r0, #160] -; CHECK-LIS-NEXT: vldrw.u32 q5, [r0, #176] -; CHECK-LIS-NEXT: vmov.f32 s6, s12 -; CHECK-LIS-NEXT: vmov.f32 s7, s15 -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #144] -; CHECK-LIS-NEXT: vadd.i32 q1, q4, q1 -; CHECK-LIS-NEXT: vmov.f32 s18, s10 -; CHECK-LIS-NEXT: vmov.f32 s25, s8 -; CHECK-LIS-NEXT: vmov.f32 s26, s11 -; CHECK-LIS-NEXT: vmov.f32 s16, s12 -; CHECK-LIS-NEXT: vmov.f32 s17, s15 -; CHECK-LIS-NEXT: vmov.f32 s24, s13 -; CHECK-LIS-NEXT: vmov.f32 s27, s22 -; CHECK-LIS-NEXT: vmov.f32 s19, s21 -; CHECK-LIS-NEXT: vmov.f32 s8, s14 -; CHECK-LIS-NEXT: vadd.i32 q4, q4, q6 -; CHECK-LIS-NEXT: vmov.f32 s10, s20 -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #112] -; CHECK-LIS-NEXT: vmov.f32 s11, s23 -; CHECK-LIS-NEXT: vldrw.u32 q5, [r0, #128] -; CHECK-LIS-NEXT: vadd.i32 q2, q4, q2 -; CHECK-LIS-NEXT: vldrw.u32 q4, [r0, #96] -; CHECK-LIS-NEXT: vmov.f32 s25, s12 -; CHECK-LIS-NEXT: vstrw.32 q2, [r1, #48] -; CHECK-LIS-NEXT: vmov.f32 s26, s15 -; CHECK-LIS-NEXT: vstrw.32 q0, [r1, #16] -; CHECK-LIS-NEXT: vmov.f32 s30, s14 -; CHECK-LIS-NEXT: vstrw.32 q1, [r1] -; CHECK-LIS-NEXT: vmov.f32 s24, s17 -; CHECK-LIS-NEXT: vmov.f32 s27, s22 -; CHECK-LIS-NEXT: vmov.f32 s28, s16 -; CHECK-LIS-NEXT: vmov.f32 s29, s19 -; CHECK-LIS-NEXT: vmov.f32 s31, s21 -; CHECK-LIS-NEXT: vadd.i32 q6, q7, q6 -; CHECK-LIS-NEXT: vmov.f32 s12, s18 -; CHECK-LIS-NEXT: vmov.f32 s14, s20 -; CHECK-LIS-NEXT: vmov.f32 s15, s23 -; CHECK-LIS-NEXT: vadd.i32 q3, q6, q3 -; CHECK-LIS-NEXT: vstrw.32 q3, [r1, #32] -; CHECK-LIS-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LIS-NEXT: bx lr +; CHECK-LABEL: vld3_v16i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vldrw.u32 q0, [r0, #64] +; CHECK-NEXT: vldrw.u32 q1, [r0, #48] +; CHECK-NEXT: vldrw.u32 q4, [r0, #80] +; CHECK-NEXT: vldrw.u32 q6, [r0, #176] +; CHECK-NEXT: vmov.f32 s10, s2 +; CHECK-NEXT: vmov.f32 s13, s0 +; CHECK-NEXT: vmov.f32 s14, s3 +; CHECK-NEXT: vmov.f32 s8, s4 +; CHECK-NEXT: vmov.f32 s9, s7 +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmov.f32 s15, s18 +; CHECK-NEXT: vmov.f32 s11, s17 +; CHECK-NEXT: vadd.i32 q2, q2, q3 +; CHECK-NEXT: vmov.f32 s0, s6 +; CHECK-NEXT: vmov.f32 s2, s16 +; CHECK-NEXT: vldrw.u32 q1, [r0, #16] +; CHECK-NEXT: vmov.f32 s3, s19 +; CHECK-NEXT: vldrw.u32 q3, [r0, #32] +; CHECK-NEXT: vadd.i32 q0, q2, q0 +; CHECK-NEXT: vldrw.u32 q2, [r0] +; CHECK-NEXT: vmov.f32 s17, s4 +; CHECK-NEXT: vmov.f32 s18, s7 +; CHECK-NEXT: vmov.f32 s22, s6 +; CHECK-NEXT: vmov.f32 s16, s9 +; CHECK-NEXT: vmov.f32 s19, s14 +; CHECK-NEXT: vmov.f32 s20, s8 +; CHECK-NEXT: vmov.f32 s21, s11 +; CHECK-NEXT: vmov.f32 s23, s13 +; CHECK-NEXT: vmov.f32 s4, s10 +; CHECK-NEXT: vldrw.u32 q2, [r0, #160] +; CHECK-NEXT: vmov.f32 s6, s12 +; CHECK-NEXT: vadd.i32 q4, q5, q4 +; CHECK-NEXT: vmov.f32 s7, s15 +; CHECK-NEXT: vldrw.u32 q3, [r0, #144] +; CHECK-NEXT: vadd.i32 q1, q4, q1 +; CHECK-NEXT: vmov.f32 s18, s10 +; CHECK-NEXT: vmov.f32 s21, s8 +; CHECK-NEXT: vmov.f32 s22, s11 +; CHECK-NEXT: vmov.f32 s16, s12 +; CHECK-NEXT: vmov.f32 s17, s15 +; CHECK-NEXT: vmov.f32 s20, s13 +; CHECK-NEXT: vmov.f32 s23, s26 +; CHECK-NEXT: vmov.f32 s19, s25 +; CHECK-NEXT: vadd.i32 q4, q4, q5 +; CHECK-NEXT: vmov.f32 s8, s14 +; CHECK-NEXT: vmov.f32 s10, s24 +; CHECK-NEXT: vldrw.u32 q3, [r0, #112] +; CHECK-NEXT: vmov.f32 s11, s27 +; CHECK-NEXT: vldrw.u32 q5, [r0, #128] +; CHECK-NEXT: vadd.i32 q2, q4, q2 +; CHECK-NEXT: vldrw.u32 q4, [r0, #96] +; CHECK-NEXT: vmov.f32 s25, s12 +; CHECK-NEXT: vstrw.32 q2, [r1, #48] +; CHECK-NEXT: vmov.f32 s26, s15 +; CHECK-NEXT: vstrw.32 q0, [r1, #16] +; CHECK-NEXT: vmov.f32 s30, s14 +; CHECK-NEXT: vstrw.32 q1, [r1] +; CHECK-NEXT: vmov.f32 s24, s17 +; CHECK-NEXT: vmov.f32 s27, s22 +; CHECK-NEXT: vmov.f32 s28, s16 +; CHECK-NEXT: vmov.f32 s29, s19 +; CHECK-NEXT: vmov.f32 s31, s21 +; CHECK-NEXT: vadd.i32 q6, q7, q6 +; CHECK-NEXT: vmov.f32 s12, s18 +; CHECK-NEXT: vmov.f32 s14, s20 +; CHECK-NEXT: vmov.f32 s15, s23 +; CHECK-NEXT: vadd.i32 q3, q6, q3 +; CHECK-NEXT: vstrw.32 q3, [r1, #32] +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: bx lr entry: %l1 = load <48 x i32>, ptr %src, align 4 diff --git a/llvm/test/CodeGen/X86/combine-pack.ll b/llvm/test/CodeGen/X86/combine-pack.ll new file mode 100644 index 0000000..2f5454d --- /dev/null +++ b/llvm/test/CodeGen/X86/combine-pack.ll @@ -0,0 +1,49 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX + +declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) + +define <8 x i16> @combine_packss_v4i32_signsplat(<4 x i32> %a0, <4 x i32> %a1) { +; SSE-LABEL: combine_packss_v4i32_signsplat: +; SSE: # %bb.0: +; SSE-NEXT: pcmpgtd %xmm1, %xmm0 +; SSE-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE-NEXT: packssdw %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_packss_v4i32_signsplat: +; AVX: # %bb.0: +; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %cmp = icmp sgt <4 x i32> %a0, %a1 + %ext = sext <4 x i1> %cmp to <4 x i32> + %pack = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %ext, <4 x i32> splat (i32 -1)) + %signsplat = ashr <8 x i16> %pack, splat (i16 15) + ret <8 x i16> %signsplat +} + +define <8 x i16> @combine_packss_v4i32_freeze_signsplat(<4 x i32> %a0, <4 x i32> %a1) { +; SSE-LABEL: combine_packss_v4i32_freeze_signsplat: +; SSE: # %bb.0: +; SSE-NEXT: pcmpgtd %xmm1, %xmm0 +; SSE-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE-NEXT: packssdw %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_packss_v4i32_freeze_signsplat: +; AVX: # %bb.0: +; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %cmp = icmp sgt <4 x i32> %a0, %a1 + %ext = sext <4 x i1> %cmp to <4 x i32> + %pack = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %ext, <4 x i32> splat (i32 -1)) + %freeze = freeze <8 x i16> %pack + %signsplat = ashr <8 x i16> %freeze, splat (i16 15) + ret <8 x i16> %signsplat +} diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s index a313741..40fcd6f 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s @@ -73,98 +73,98 @@ v_tanh_f32 v5, src_scc v_tanh_f32 v255, 0xaf123456 // GFX1250: v_tanh_f32_e32 v255, 0xaf123456 ; encoding: [0xff,0x3c,0xfe,0x7f,0x56,0x34,0x12,0xaf] -v_tanh_f16 v5, v1 -// GFX1250: v_tanh_f16_e32 v5, v1 ; encoding: [0x01,0x3f,0x0a,0x7e] +v_tanh_f16 v5.l, v1.l +// GFX1250: v_tanh_f16_e32 v5.l, v1.l ; encoding: [0x01,0x3f,0x0a,0x7e] -v_tanh_f16 v5, v127 -// GFX1250: v_tanh_f16_e32 v5, v127 ; encoding: [0x7f,0x3f,0x0a,0x7e] +v_tanh_f16 v5.l, v127.l +// GFX1250: v_tanh_f16_e32 v5.l, v127.l ; encoding: [0x7f,0x3f,0x0a,0x7e] -v_tanh_f16 v5, s1 -// GFX1250: v_tanh_f16_e32 v5, s1 ; encoding: [0x01,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, s1 +// GFX1250: v_tanh_f16_e32 v5.l, s1 ; encoding: [0x01,0x3e,0x0a,0x7e] -v_tanh_f16 v5, s105 -// GFX1250: v_tanh_f16_e32 v5, s105 ; encoding: [0x69,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, s105 +// GFX1250: v_tanh_f16_e32 v5.l, s105 ; encoding: [0x69,0x3e,0x0a,0x7e] -v_tanh_f16 v5, vcc_lo -// GFX1250: v_tanh_f16_e32 v5, vcc_lo ; encoding: [0x6a,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, vcc_lo +// GFX1250: v_tanh_f16_e32 v5.l, vcc_lo ; encoding: [0x6a,0x3e,0x0a,0x7e] -v_tanh_f16 v5, vcc_hi -// GFX1250: v_tanh_f16_e32 v5, vcc_hi ; encoding: [0x6b,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, vcc_hi +// GFX1250: v_tanh_f16_e32 v5.l, vcc_hi ; encoding: [0x6b,0x3e,0x0a,0x7e] -v_tanh_f16 v5, ttmp15 -// GFX1250: v_tanh_f16_e32 v5, ttmp15 ; encoding: [0x7b,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, ttmp15 +// GFX1250: v_tanh_f16_e32 v5.l, ttmp15 ; encoding: [0x7b,0x3e,0x0a,0x7e] -v_tanh_f16 v5, m0 -// GFX1250: v_tanh_f16_e32 v5, m0 ; encoding: [0x7d,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, m0 +// GFX1250: v_tanh_f16_e32 v5.l, m0 ; encoding: [0x7d,0x3e,0x0a,0x7e] -v_tanh_f16 v5, exec_lo -// GFX1250: v_tanh_f16_e32 v5, exec_lo ; encoding: [0x7e,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, exec_lo +// GFX1250: v_tanh_f16_e32 v5.l, exec_lo ; encoding: [0x7e,0x3e,0x0a,0x7e] -v_tanh_f16 v5, exec_hi -// GFX1250: v_tanh_f16_e32 v5, exec_hi ; encoding: [0x7f,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, exec_hi +// GFX1250: v_tanh_f16_e32 v5.l, exec_hi ; encoding: [0x7f,0x3e,0x0a,0x7e] -v_tanh_f16 v5, null -// GFX1250: v_tanh_f16_e32 v5, null ; encoding: [0x7c,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, null +// GFX1250: v_tanh_f16_e32 v5.l, null ; encoding: [0x7c,0x3e,0x0a,0x7e] -v_tanh_f16 v5, -1 -// GFX1250: v_tanh_f16_e32 v5, -1 ; encoding: [0xc1,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, -1 +// GFX1250: v_tanh_f16_e32 v5.l, -1 ; encoding: [0xc1,0x3e,0x0a,0x7e] -v_tanh_f16 v5, 0.5 -// GFX1250: v_tanh_f16_e32 v5, 0.5 ; encoding: [0xf0,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, 0.5 +// GFX1250: v_tanh_f16_e32 v5.l, 0.5 ; encoding: [0xf0,0x3e,0x0a,0x7e] -v_tanh_f16 v5, src_scc -// GFX1250: v_tanh_f16_e32 v5, src_scc ; encoding: [0xfd,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, src_scc +// GFX1250: v_tanh_f16_e32 v5.l, src_scc ; encoding: [0xfd,0x3e,0x0a,0x7e] -v_tanh_f16 v127, 0x8000 -// GFX1250: v_tanh_f16_e32 v127, 0x8000 ; encoding: [0xff,0x3e,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_tanh_f16 v127.l, 0x8000 +// GFX1250: v_tanh_f16_e32 v127.l, 0x8000 ; encoding: [0xff,0x3e,0xfe,0x7e,0x00,0x80,0x00,0x00] v_tanh_f16 v5.h, v1.h // GFX1250: v_tanh_f16_e32 v5.h, v1.h ; encoding: [0x81,0x3f,0x0a,0x7f] -v_tanh_bf16 v5, v1 -// GFX1250: v_tanh_bf16_e32 v5, v1 ; encoding: [0x01,0x95,0x0a,0x7e] +v_tanh_bf16 v5.l, v1.l +// GFX1250: v_tanh_bf16_e32 v5.l, v1.l ; encoding: [0x01,0x95,0x0a,0x7e] -v_tanh_bf16 v5, v127 -// GFX1250: v_tanh_bf16_e32 v5, v127 ; encoding: [0x7f,0x95,0x0a,0x7e] +v_tanh_bf16 v5.l, v127.l +// GFX1250: v_tanh_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0x95,0x0a,0x7e] -v_tanh_bf16 v5, s1 -// GFX1250: v_tanh_bf16_e32 v5, s1 ; encoding: [0x01,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, s1 +// GFX1250: v_tanh_bf16_e32 v5.l, s1 ; encoding: [0x01,0x94,0x0a,0x7e] -v_tanh_bf16 v5, s105 -// GFX1250: v_tanh_bf16_e32 v5, s105 ; encoding: [0x69,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, s105 +// GFX1250: v_tanh_bf16_e32 v5.l, s105 ; encoding: [0x69,0x94,0x0a,0x7e] -v_tanh_bf16 v5, vcc_lo -// GFX1250: v_tanh_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, vcc_lo +// GFX1250: v_tanh_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0x94,0x0a,0x7e] -v_tanh_bf16 v5, vcc_hi -// GFX1250: v_tanh_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, vcc_hi +// GFX1250: v_tanh_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0x94,0x0a,0x7e] -v_tanh_bf16 v5, ttmp15 -// GFX1250: v_tanh_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, ttmp15 +// GFX1250: v_tanh_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0x94,0x0a,0x7e] -v_tanh_bf16 v5, m0 -// GFX1250: v_tanh_bf16_e32 v5, m0 ; encoding: [0x7d,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, m0 +// GFX1250: v_tanh_bf16_e32 v5.l, m0 ; encoding: [0x7d,0x94,0x0a,0x7e] -v_tanh_bf16 v5, exec_lo -// GFX1250: v_tanh_bf16_e32 v5, exec_lo ; encoding: [0x7e,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, exec_lo +// GFX1250: v_tanh_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0x94,0x0a,0x7e] -v_tanh_bf16 v5, exec_hi -// GFX1250: v_tanh_bf16_e32 v5, exec_hi ; encoding: [0x7f,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, exec_hi +// GFX1250: v_tanh_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0x94,0x0a,0x7e] -v_tanh_bf16 v5, null -// GFX1250: v_tanh_bf16_e32 v5, null ; encoding: [0x7c,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, null +// GFX1250: v_tanh_bf16_e32 v5.l, null ; encoding: [0x7c,0x94,0x0a,0x7e] -v_tanh_bf16 v5, -1 -// GFX1250: v_tanh_bf16_e32 v5, -1 ; encoding: [0xc1,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, -1 +// GFX1250: v_tanh_bf16_e32 v5.l, -1 ; encoding: [0xc1,0x94,0x0a,0x7e] -v_tanh_bf16 v5, 0.5 -// GFX1250: v_tanh_bf16_e32 v5, 0.5 ; encoding: [0xf0,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, 0.5 +// GFX1250: v_tanh_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0x94,0x0a,0x7e] -v_tanh_bf16 v5, src_scc -// GFX1250: v_tanh_bf16_e32 v5, src_scc ; encoding: [0xfd,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, src_scc +// GFX1250: v_tanh_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0x94,0x0a,0x7e] -v_tanh_bf16 v127, 0x8000 -// GFX1250: v_tanh_bf16_e32 v127, 0x8000 ; encoding: [0xff,0x94,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_tanh_bf16 v127.l, 0x8000 +// GFX1250: v_tanh_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0x94,0xfe,0x7e,0x00,0x80,0x00,0x00] v_tanh_bf16 v5.h, v1.h // GFX1250: v_tanh_bf16_e32 v5.h, v1.h ; encoding: [0x81,0x95,0x0a,0x7f] @@ -214,347 +214,347 @@ v_prng_b32 v5, src_scc v_prng_b32 v255, 0xaf123456 // GFX1250: v_prng_b32_e32 v255, 0xaf123456 ; encoding: [0xff,0x96,0xfe,0x7f,0x56,0x34,0x12,0xaf] -v_rcp_bf16 v5, v1 -// GFX1250: v_rcp_bf16_e32 v5, v1 ; encoding: [0x01,0xf3,0x0a,0x7e] +v_rcp_bf16 v5.l, v1.l +// GFX1250: v_rcp_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xf3,0x0a,0x7e] -v_rcp_bf16 v5, v127 -// GFX1250: v_rcp_bf16_e32 v5, v127 ; encoding: [0x7f,0xf3,0x0a,0x7e] +v_rcp_bf16 v5.l, v127.l +// GFX1250: v_rcp_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xf3,0x0a,0x7e] -v_rcp_bf16 v5, s1 -// GFX1250: v_rcp_bf16_e32 v5, s1 ; encoding: [0x01,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, s1 +// GFX1250: v_rcp_bf16_e32 v5.l, s1 ; encoding: [0x01,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, s105 -// GFX1250: v_rcp_bf16_e32 v5, s105 ; encoding: [0x69,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, s105 +// GFX1250: v_rcp_bf16_e32 v5.l, s105 ; encoding: [0x69,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, vcc_lo -// GFX1250: v_rcp_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, vcc_lo +// GFX1250: v_rcp_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, vcc_hi -// GFX1250: v_rcp_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, vcc_hi +// GFX1250: v_rcp_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, ttmp15 -// GFX1250: v_rcp_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, ttmp15 +// GFX1250: v_rcp_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, m0 -// GFX1250: v_rcp_bf16_e32 v5, m0 ; encoding: [0x7d,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, m0 +// GFX1250: v_rcp_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, exec_lo -// GFX1250: v_rcp_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, exec_lo +// GFX1250: v_rcp_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, exec_hi -// GFX1250: v_rcp_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, exec_hi +// GFX1250: v_rcp_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, null -// GFX1250: v_rcp_bf16_e32 v5, null ; encoding: [0x7c,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, null +// GFX1250: v_rcp_bf16_e32 v5.l, null ; encoding: [0x7c,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, -1 -// GFX1250: v_rcp_bf16_e32 v5, -1 ; encoding: [0xc1,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, -1 +// GFX1250: v_rcp_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, 0.5 -// GFX1250: v_rcp_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, 0.5 +// GFX1250: v_rcp_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, src_scc -// GFX1250: v_rcp_bf16_e32 v5, src_scc ; encoding: [0xfd,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, src_scc +// GFX1250: v_rcp_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xf2,0x0a,0x7e] -v_rcp_bf16 v127, 0x8000 -// GFX1250: v_rcp_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xf2,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_rcp_bf16 v127.l, 0x8000 +// GFX1250: v_rcp_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xf2,0xfe,0x7e,0x00,0x80,0x00,0x00] v_rcp_bf16 v5.h, v1.h // GFX1250: v_rcp_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xf3,0x0a,0x7f] -v_sqrt_bf16 v5, v1 -// GFX1250: v_sqrt_bf16_e32 v5, v1 ; encoding: [0x01,0xf5,0x0a,0x7e] +v_sqrt_bf16 v5.l, v1.l +// GFX1250: v_sqrt_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xf5,0x0a,0x7e] -v_sqrt_bf16 v5, v127 -// GFX1250: v_sqrt_bf16_e32 v5, v127 ; encoding: [0x7f,0xf5,0x0a,0x7e] +v_sqrt_bf16 v5.l, v127.l +// GFX1250: v_sqrt_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xf5,0x0a,0x7e] -v_sqrt_bf16 v5, s1 -// GFX1250: v_sqrt_bf16_e32 v5, s1 ; encoding: [0x01,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, s1 +// GFX1250: v_sqrt_bf16_e32 v5.l, s1 ; encoding: [0x01,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, s105 -// GFX1250: v_sqrt_bf16_e32 v5, s105 ; encoding: [0x69,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, s105 +// GFX1250: v_sqrt_bf16_e32 v5.l, s105 ; encoding: [0x69,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, vcc_lo -// GFX1250: v_sqrt_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, vcc_lo +// GFX1250: v_sqrt_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, vcc_hi -// GFX1250: v_sqrt_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, vcc_hi +// GFX1250: v_sqrt_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, ttmp15 -// GFX1250: v_sqrt_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, ttmp15 +// GFX1250: v_sqrt_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, m0 -// GFX1250: v_sqrt_bf16_e32 v5, m0 ; encoding: [0x7d,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, m0 +// GFX1250: v_sqrt_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, exec_lo -// GFX1250: v_sqrt_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, exec_lo +// GFX1250: v_sqrt_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, exec_hi -// GFX1250: v_sqrt_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, exec_hi +// GFX1250: v_sqrt_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, null -// GFX1250: v_sqrt_bf16_e32 v5, null ; encoding: [0x7c,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, null +// GFX1250: v_sqrt_bf16_e32 v5.l, null ; encoding: [0x7c,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, -1 -// GFX1250: v_sqrt_bf16_e32 v5, -1 ; encoding: [0xc1,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, -1 +// GFX1250: v_sqrt_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, 0.5 -// GFX1250: v_sqrt_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, 0.5 +// GFX1250: v_sqrt_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, src_scc -// GFX1250: v_sqrt_bf16_e32 v5, src_scc ; encoding: [0xfd,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, src_scc +// GFX1250: v_sqrt_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xf4,0x0a,0x7e] -v_sqrt_bf16 v127, 0x8000 -// GFX1250: v_sqrt_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xf4,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_sqrt_bf16 v127.l, 0x8000 +// GFX1250: v_sqrt_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xf4,0xfe,0x7e,0x00,0x80,0x00,0x00] v_sqrt_bf16 v5.h, v1.h // GFX1250: v_sqrt_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xf5,0x0a,0x7f] -v_rsq_bf16 v5, v1 -// GFX1250: v_rsq_bf16_e32 v5, v1 ; encoding: [0x01,0xf7,0x0a,0x7e] +v_rsq_bf16 v5.l, v1.l +// GFX1250: v_rsq_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xf7,0x0a,0x7e] -v_rsq_bf16 v5, v127 -// GFX1250: v_rsq_bf16_e32 v5, v127 ; encoding: [0x7f,0xf7,0x0a,0x7e] +v_rsq_bf16 v5.l, v127.l +// GFX1250: v_rsq_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xf7,0x0a,0x7e] -v_rsq_bf16 v5, s1 -// GFX1250: v_rsq_bf16_e32 v5, s1 ; encoding: [0x01,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, s1 +// GFX1250: v_rsq_bf16_e32 v5.l, s1 ; encoding: [0x01,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, s105 -// GFX1250: v_rsq_bf16_e32 v5, s105 ; encoding: [0x69,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, s105 +// GFX1250: v_rsq_bf16_e32 v5.l, s105 ; encoding: [0x69,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, vcc_lo -// GFX1250: v_rsq_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, vcc_lo +// GFX1250: v_rsq_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, vcc_hi -// GFX1250: v_rsq_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, vcc_hi +// GFX1250: v_rsq_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, ttmp15 -// GFX1250: v_rsq_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, ttmp15 +// GFX1250: v_rsq_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, m0 -// GFX1250: v_rsq_bf16_e32 v5, m0 ; encoding: [0x7d,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, m0 +// GFX1250: v_rsq_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, exec_lo -// GFX1250: v_rsq_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, exec_lo +// GFX1250: v_rsq_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, exec_hi -// GFX1250: v_rsq_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, exec_hi +// GFX1250: v_rsq_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, null -// GFX1250: v_rsq_bf16_e32 v5, null ; encoding: [0x7c,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, null +// GFX1250: v_rsq_bf16_e32 v5.l, null ; encoding: [0x7c,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, -1 -// GFX1250: v_rsq_bf16_e32 v5, -1 ; encoding: [0xc1,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, -1 +// GFX1250: v_rsq_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, 0.5 -// GFX1250: v_rsq_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, 0.5 +// GFX1250: v_rsq_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, src_scc -// GFX1250: v_rsq_bf16_e32 v5, src_scc ; encoding: [0xfd,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, src_scc +// GFX1250: v_rsq_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xf6,0x0a,0x7e] -v_rsq_bf16 v127, 0x8000 -// GFX1250: v_rsq_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xf6,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_rsq_bf16 v127.l, 0x8000 +// GFX1250: v_rsq_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xf6,0xfe,0x7e,0x00,0x80,0x00,0x00] v_rsq_bf16 v5.h, v1.h // GFX1250: v_rsq_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xf7,0x0a,0x7f] -v_log_bf16 v5, v1 -// GFX1250: v_log_bf16_e32 v5, v1 ; encoding: [0x01,0xf9,0x0a,0x7e] +v_log_bf16 v5.l, v1.l +// GFX1250: v_log_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xf9,0x0a,0x7e] -v_log_bf16 v5, v127 -// GFX1250: v_log_bf16_e32 v5, v127 ; encoding: [0x7f,0xf9,0x0a,0x7e] +v_log_bf16 v5.l, v127.l +// GFX1250: v_log_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xf9,0x0a,0x7e] -v_log_bf16 v5, s1 -// GFX1250: v_log_bf16_e32 v5, s1 ; encoding: [0x01,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, s1 +// GFX1250: v_log_bf16_e32 v5.l, s1 ; encoding: [0x01,0xf8,0x0a,0x7e] -v_log_bf16 v5, s105 -// GFX1250: v_log_bf16_e32 v5, s105 ; encoding: [0x69,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, s105 +// GFX1250: v_log_bf16_e32 v5.l, s105 ; encoding: [0x69,0xf8,0x0a,0x7e] -v_log_bf16 v5, vcc_lo -// GFX1250: v_log_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, vcc_lo +// GFX1250: v_log_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xf8,0x0a,0x7e] -v_log_bf16 v5, vcc_hi -// GFX1250: v_log_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, vcc_hi +// GFX1250: v_log_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xf8,0x0a,0x7e] -v_log_bf16 v5, ttmp15 -// GFX1250: v_log_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, ttmp15 +// GFX1250: v_log_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xf8,0x0a,0x7e] -v_log_bf16 v5, m0 -// GFX1250: v_log_bf16_e32 v5, m0 ; encoding: [0x7d,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, m0 +// GFX1250: v_log_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xf8,0x0a,0x7e] -v_log_bf16 v5, exec_lo -// GFX1250: v_log_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, exec_lo +// GFX1250: v_log_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xf8,0x0a,0x7e] -v_log_bf16 v5, exec_hi -// GFX1250: v_log_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, exec_hi +// GFX1250: v_log_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xf8,0x0a,0x7e] -v_log_bf16 v5, null -// GFX1250: v_log_bf16_e32 v5, null ; encoding: [0x7c,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, null +// GFX1250: v_log_bf16_e32 v5.l, null ; encoding: [0x7c,0xf8,0x0a,0x7e] -v_log_bf16 v5, -1 -// GFX1250: v_log_bf16_e32 v5, -1 ; encoding: [0xc1,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, -1 +// GFX1250: v_log_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xf8,0x0a,0x7e] -v_log_bf16 v5, 0.5 -// GFX1250: v_log_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, 0.5 +// GFX1250: v_log_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xf8,0x0a,0x7e] -v_log_bf16 v5, src_scc -// GFX1250: v_log_bf16_e32 v5, src_scc ; encoding: [0xfd,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, src_scc +// GFX1250: v_log_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xf8,0x0a,0x7e] -v_log_bf16 v127, 0x8000 -// GFX1250: v_log_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xf8,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_log_bf16 v127.l, 0x8000 +// GFX1250: v_log_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xf8,0xfe,0x7e,0x00,0x80,0x00,0x00] v_log_bf16 v5.h, v1.h // GFX1250: v_log_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xf9,0x0a,0x7f] -v_exp_bf16 v5, v1 -// GFX1250: v_exp_bf16_e32 v5, v1 ; encoding: [0x01,0xfb,0x0a,0x7e] +v_exp_bf16 v5.l, v1.l +// GFX1250: v_exp_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xfb,0x0a,0x7e] -v_exp_bf16 v5, v127 -// GFX1250: v_exp_bf16_e32 v5, v127 ; encoding: [0x7f,0xfb,0x0a,0x7e] +v_exp_bf16 v5.l, v127.l +// GFX1250: v_exp_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xfb,0x0a,0x7e] -v_exp_bf16 v5, s1 -// GFX1250: v_exp_bf16_e32 v5, s1 ; encoding: [0x01,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, s1 +// GFX1250: v_exp_bf16_e32 v5.l, s1 ; encoding: [0x01,0xfa,0x0a,0x7e] -v_exp_bf16 v5, s105 -// GFX1250: v_exp_bf16_e32 v5, s105 ; encoding: [0x69,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, s105 +// GFX1250: v_exp_bf16_e32 v5.l, s105 ; encoding: [0x69,0xfa,0x0a,0x7e] -v_exp_bf16 v5, vcc_lo -// GFX1250: v_exp_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, vcc_lo +// GFX1250: v_exp_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xfa,0x0a,0x7e] -v_exp_bf16 v5, vcc_hi -// GFX1250: v_exp_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, vcc_hi +// GFX1250: v_exp_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xfa,0x0a,0x7e] -v_exp_bf16 v5, ttmp15 -// GFX1250: v_exp_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, ttmp15 +// GFX1250: v_exp_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xfa,0x0a,0x7e] -v_exp_bf16 v5, m0 -// GFX1250: v_exp_bf16_e32 v5, m0 ; encoding: [0x7d,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, m0 +// GFX1250: v_exp_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xfa,0x0a,0x7e] -v_exp_bf16 v5, exec_lo -// GFX1250: v_exp_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, exec_lo +// GFX1250: v_exp_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xfa,0x0a,0x7e] -v_exp_bf16 v5, exec_hi -// GFX1250: v_exp_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, exec_hi +// GFX1250: v_exp_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xfa,0x0a,0x7e] -v_exp_bf16 v5, null -// GFX1250: v_exp_bf16_e32 v5, null ; encoding: [0x7c,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, null +// GFX1250: v_exp_bf16_e32 v5.l, null ; encoding: [0x7c,0xfa,0x0a,0x7e] -v_exp_bf16 v5, -1 -// GFX1250: v_exp_bf16_e32 v5, -1 ; encoding: [0xc1,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, -1 +// GFX1250: v_exp_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xfa,0x0a,0x7e] -v_exp_bf16 v5, 0.5 -// GFX1250: v_exp_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, 0.5 +// GFX1250: v_exp_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xfa,0x0a,0x7e] -v_exp_bf16 v5, src_scc -// GFX1250: v_exp_bf16_e32 v5, src_scc ; encoding: [0xfd,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, src_scc +// GFX1250: v_exp_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xfa,0x0a,0x7e] -v_exp_bf16 v127, 0x8000 -// GFX1250: v_exp_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xfa,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_exp_bf16 v127.l, 0x8000 +// GFX1250: v_exp_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xfa,0xfe,0x7e,0x00,0x80,0x00,0x00] v_exp_bf16 v5.h, v1.h // GFX1250: v_exp_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xfb,0x0a,0x7f] -v_sin_bf16 v5, v1 -// GFX1250: v_sin_bf16_e32 v5, v1 ; encoding: [0x01,0xfd,0x0a,0x7e] +v_sin_bf16 v5.l, v1.l +// GFX1250: v_sin_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xfd,0x0a,0x7e] -v_sin_bf16 v5, v127 -// GFX1250: v_sin_bf16_e32 v5, v127 ; encoding: [0x7f,0xfd,0x0a,0x7e] +v_sin_bf16 v5.l, v127.l +// GFX1250: v_sin_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xfd,0x0a,0x7e] -v_sin_bf16 v5, s1 -// GFX1250: v_sin_bf16_e32 v5, s1 ; encoding: [0x01,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, s1 +// GFX1250: v_sin_bf16_e32 v5.l, s1 ; encoding: [0x01,0xfc,0x0a,0x7e] -v_sin_bf16 v5, s105 -// GFX1250: v_sin_bf16_e32 v5, s105 ; encoding: [0x69,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, s105 +// GFX1250: v_sin_bf16_e32 v5.l, s105 ; encoding: [0x69,0xfc,0x0a,0x7e] -v_sin_bf16 v5, vcc_lo -// GFX1250: v_sin_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, vcc_lo +// GFX1250: v_sin_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xfc,0x0a,0x7e] -v_sin_bf16 v5, vcc_hi -// GFX1250: v_sin_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, vcc_hi +// GFX1250: v_sin_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xfc,0x0a,0x7e] -v_sin_bf16 v5, ttmp15 -// GFX1250: v_sin_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, ttmp15 +// GFX1250: v_sin_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xfc,0x0a,0x7e] -v_sin_bf16 v5, m0 -// GFX1250: v_sin_bf16_e32 v5, m0 ; encoding: [0x7d,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, m0 +// GFX1250: v_sin_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xfc,0x0a,0x7e] -v_sin_bf16 v5, exec_lo -// GFX1250: v_sin_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, exec_lo +// GFX1250: v_sin_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xfc,0x0a,0x7e] -v_sin_bf16 v5, exec_hi -// GFX1250: v_sin_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, exec_hi +// GFX1250: v_sin_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xfc,0x0a,0x7e] -v_sin_bf16 v5, null -// GFX1250: v_sin_bf16_e32 v5, null ; encoding: [0x7c,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, null +// GFX1250: v_sin_bf16_e32 v5.l, null ; encoding: [0x7c,0xfc,0x0a,0x7e] -v_sin_bf16 v5, -1 -// GFX1250: v_sin_bf16_e32 v5, -1 ; encoding: [0xc1,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, -1 +// GFX1250: v_sin_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xfc,0x0a,0x7e] -v_sin_bf16 v5, 0.5 -// GFX1250: v_sin_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, 0.5 +// GFX1250: v_sin_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xfc,0x0a,0x7e] -v_sin_bf16 v5, src_scc -// GFX1250: v_sin_bf16_e32 v5, src_scc ; encoding: [0xfd,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, src_scc +// GFX1250: v_sin_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xfc,0x0a,0x7e] -v_sin_bf16 v127, 0x8000 -// GFX1250: v_sin_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xfc,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_sin_bf16 v127.l, 0x8000 +// GFX1250: v_sin_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xfc,0xfe,0x7e,0x00,0x80,0x00,0x00] v_sin_bf16 v5.h, v1.h // GFX1250: v_sin_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xfd,0x0a,0x7f] -v_cos_bf16 v5, v1 -// GFX1250: v_cos_bf16_e32 v5, v1 ; encoding: [0x01,0xff,0x0a,0x7e] +v_cos_bf16 v5.l, v1.l +// GFX1250: v_cos_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xff,0x0a,0x7e] -v_cos_bf16 v5, v127 -// GFX1250: v_cos_bf16_e32 v5, v127 ; encoding: [0x7f,0xff,0x0a,0x7e] +v_cos_bf16 v5.l, v127.l +// GFX1250: v_cos_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xff,0x0a,0x7e] -v_cos_bf16 v5, s1 -// GFX1250: v_cos_bf16_e32 v5, s1 ; encoding: [0x01,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, s1 +// GFX1250: v_cos_bf16_e32 v5.l, s1 ; encoding: [0x01,0xfe,0x0a,0x7e] -v_cos_bf16 v5, s105 -// GFX1250: v_cos_bf16_e32 v5, s105 ; encoding: [0x69,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, s105 +// GFX1250: v_cos_bf16_e32 v5.l, s105 ; encoding: [0x69,0xfe,0x0a,0x7e] -v_cos_bf16 v5, vcc_lo -// GFX1250: v_cos_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, vcc_lo +// GFX1250: v_cos_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xfe,0x0a,0x7e] -v_cos_bf16 v5, vcc_hi -// GFX1250: v_cos_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, vcc_hi +// GFX1250: v_cos_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xfe,0x0a,0x7e] -v_cos_bf16 v5, ttmp15 -// GFX1250: v_cos_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, ttmp15 +// GFX1250: v_cos_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xfe,0x0a,0x7e] -v_cos_bf16 v5, m0 -// GFX1250: v_cos_bf16_e32 v5, m0 ; encoding: [0x7d,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, m0 +// GFX1250: v_cos_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xfe,0x0a,0x7e] -v_cos_bf16 v5, exec_lo -// GFX1250: v_cos_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, exec_lo +// GFX1250: v_cos_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xfe,0x0a,0x7e] -v_cos_bf16 v5, exec_hi -// GFX1250: v_cos_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, exec_hi +// GFX1250: v_cos_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xfe,0x0a,0x7e] -v_cos_bf16 v5, null -// GFX1250: v_cos_bf16_e32 v5, null ; encoding: [0x7c,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, null +// GFX1250: v_cos_bf16_e32 v5.l, null ; encoding: [0x7c,0xfe,0x0a,0x7e] -v_cos_bf16 v5, -1 -// GFX1250: v_cos_bf16_e32 v5, -1 ; encoding: [0xc1,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, -1 +// GFX1250: v_cos_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xfe,0x0a,0x7e] -v_cos_bf16 v5, 0.5 -// GFX1250: v_cos_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, 0.5 +// GFX1250: v_cos_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xfe,0x0a,0x7e] -v_cos_bf16 v5, src_scc -// GFX1250: v_cos_bf16_e32 v5, src_scc ; encoding: [0xfd,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, src_scc +// GFX1250: v_cos_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xfe,0x0a,0x7e] -v_cos_bf16 v127, 0x8000 -// GFX1250: v_cos_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xfe,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_cos_bf16 v127.l, 0x8000 +// GFX1250: v_cos_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xfe,0xfe,0x7e,0x00,0x80,0x00,0x00] v_cos_bf16 v5.h, v1.h // GFX1250: v_cos_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xff,0x0a,0x7f] -v_cvt_f32_bf16 v5, v1 -// GFX1250: v_cvt_f32_bf16_e32 v5, v1 ; encoding: [0x01,0xe5,0x0a,0x7e] +v_cvt_f32_bf16 v5, v1.l +// GFX1250: v_cvt_f32_bf16_e32 v5, v1.l ; encoding: [0x01,0xe5,0x0a,0x7e] -v_cvt_f32_bf16 v5, v127 -// GFX1250: v_cvt_f32_bf16_e32 v5, v127 ; encoding: [0x7f,0xe5,0x0a,0x7e] +v_cvt_f32_bf16 v5, v127.l +// GFX1250: v_cvt_f32_bf16_e32 v5, v127.l ; encoding: [0x7f,0xe5,0x0a,0x7e] v_cvt_f32_bf16 v5, s1 // GFX1250: v_cvt_f32_bf16_e32 v5, s1 ; encoding: [0x01,0xe4,0x0a,0x7e] @@ -676,11 +676,11 @@ v_cvt_pk_f32_bf8_e32 v[2:3], 3 v_cvt_pk_f32_bf8_e32 v[4:5], 3 // GFX1250: v_cvt_pk_f32_bf8_e32 v[4:5], 3 ; encoding: [0x83,0xde,0x08,0x7e] -v_cvt_pk_f32_bf8_e32 v[2:3], v3 -// GFX1250: v_cvt_pk_f32_bf8_e32 v[2:3], v3 ; encoding: [0x03,0xdf,0x04,0x7e] +v_cvt_pk_f32_bf8_e32 v[2:3], v3.l +// GFX1250: v_cvt_pk_f32_bf8_e32 v[2:3], v3.l ; encoding: [0x03,0xdf,0x04,0x7e] -v_cvt_pk_f32_bf8_e32 v[4:5], v3 -// GFX1250: v_cvt_pk_f32_bf8_e32 v[4:5], v3 ; encoding: [0x03,0xdf,0x08,0x7e] +v_cvt_pk_f32_bf8_e32 v[4:5], v3.l +// GFX1250: v_cvt_pk_f32_bf8_e32 v[4:5], v3.l ; encoding: [0x03,0xdf,0x08,0x7e] v_cvt_pk_f32_bf8_e32 v[4:5], v127.h // GFX1250: v_cvt_pk_f32_bf8_e32 v[4:5], v127.h ; encoding: [0xff,0xdf,0x08,0x7e] @@ -703,32 +703,32 @@ v_cvt_pk_f32_fp8_e32 v[4:5], v127.h v_cvt_pk_f32_fp8_e32 v[4:5], v127.l // GFX1250: v_cvt_pk_f32_fp8_e32 v[4:5], v127.l ; encoding: [0x7f,0xdd,0x08,0x7e] -v_sat_pk4_i4_i8 v1, v2 -// GFX1250: v_sat_pk4_i4_i8_e32 v1, v2 ; encoding: [0x02,0xe7,0x02,0x7e] +v_sat_pk4_i4_i8 v1.l, v2 +// GFX1250: v_sat_pk4_i4_i8_e32 v1.l, v2 ; encoding: [0x02,0xe7,0x02,0x7e] -v_sat_pk4_i4_i8 v1, s2 -// GFX1250: v_sat_pk4_i4_i8_e32 v1, s2 ; encoding: [0x02,0xe6,0x02,0x7e] +v_sat_pk4_i4_i8 v1.l, s2 +// GFX1250: v_sat_pk4_i4_i8_e32 v1.l, s2 ; encoding: [0x02,0xe6,0x02,0x7e] -v_sat_pk4_i4_i8 v1, 2 -// GFX1250: v_sat_pk4_i4_i8_e32 v1, 2 ; encoding: [0x82,0xe6,0x02,0x7e] +v_sat_pk4_i4_i8 v1.l, 2 +// GFX1250: v_sat_pk4_i4_i8_e32 v1.l, 2 ; encoding: [0x82,0xe6,0x02,0x7e] -v_sat_pk4_i4_i8 v1, 0x1234 -// GFX1250: v_sat_pk4_i4_i8_e32 v1, 0x1234 ; encoding: [0xff,0xe6,0x02,0x7e,0x34,0x12,0x00,0x00] +v_sat_pk4_i4_i8 v1.l, 0x1234 +// GFX1250: v_sat_pk4_i4_i8_e32 v1.l, 0x1234 ; encoding: [0xff,0xe6,0x02,0x7e,0x34,0x12,0x00,0x00] v_sat_pk4_i4_i8 v1.h, v2 // GFX1250: v_sat_pk4_i4_i8_e32 v1.h, v2 ; encoding: [0x02,0xe7,0x02,0x7f] -v_sat_pk4_u4_u8 v1, v2 -// GFX1250: v_sat_pk4_u4_u8_e32 v1, v2 ; encoding: [0x02,0xe9,0x02,0x7e] +v_sat_pk4_u4_u8 v1.l, v2 +// GFX1250: v_sat_pk4_u4_u8_e32 v1.l, v2 ; encoding: [0x02,0xe9,0x02,0x7e] -v_sat_pk4_u4_u8 v1, s2 -// GFX1250: v_sat_pk4_u4_u8_e32 v1, s2 ; encoding: [0x02,0xe8,0x02,0x7e] +v_sat_pk4_u4_u8 v1.l, s2 +// GFX1250: v_sat_pk4_u4_u8_e32 v1.l, s2 ; encoding: [0x02,0xe8,0x02,0x7e] -v_sat_pk4_u4_u8 v1, 2 -// GFX1250: v_sat_pk4_u4_u8_e32 v1, 2 ; encoding: [0x82,0xe8,0x02,0x7e] +v_sat_pk4_u4_u8 v1.l, 2 +// GFX1250: v_sat_pk4_u4_u8_e32 v1.l, 2 ; encoding: [0x82,0xe8,0x02,0x7e] -v_sat_pk4_u4_u8 v1, 0x1234 -// GFX1250: v_sat_pk4_u4_u8_e32 v1, 0x1234 ; encoding: [0xff,0xe8,0x02,0x7e,0x34,0x12,0x00,0x00] +v_sat_pk4_u4_u8 v1.l, 0x1234 +// GFX1250: v_sat_pk4_u4_u8_e32 v1.l, 0x1234 ; encoding: [0xff,0xe8,0x02,0x7e,0x34,0x12,0x00,0x00] v_sat_pk4_u4_u8 v1.h, v2 // GFX1250: v_sat_pk4_u4_u8_e32 v1.h, v2 ; encoding: [0x02,0xe9,0x02,0x7f] diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp16.s index 0a46f2f..592619f 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp16.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp16.s @@ -58,120 +58,120 @@ v_tanh_f32 v255, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi // GFX1250: v_tanh_f32_dpp v255, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0x3c,0xfe,0x7f,0xff,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_tanh_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_tanh_f16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_tanh_f16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_tanh_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_tanh_f16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_tanh_f16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_mirror -// GFX1250: v_tanh_f16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_mirror +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_half_mirror -// GFX1250: v_tanh_f16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_half_mirror +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_shl:1 -// GFX1250: v_tanh_f16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_shl:1 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_shl:15 -// GFX1250: v_tanh_f16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_shl:15 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_shr:1 -// GFX1250: v_tanh_f16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_shr:1 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_shr:15 -// GFX1250: v_tanh_f16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_shr:15 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_ror:1 -// GFX1250: v_tanh_f16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_ror:1 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_ror:15 -// GFX1250: v_tanh_f16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_ror:15 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_tanh_f16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_tanh_f16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_tanh_f16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_tanh_f16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_tanh_f16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_tanh_f16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0x3e,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_tanh_f16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_tanh_f16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0x3e,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_f16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_tanh_f16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_tanh_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_tanh_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_tanh_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_tanh_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_mirror -// GFX1250: v_tanh_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_mirror +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_half_mirror -// GFX1250: v_tanh_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_shl:1 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_shl:15 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_shr:1 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_shr:15 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_ror:1 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_ror:15 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_tanh_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_tanh_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_tanh_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_tanh_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0x94,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_tanh_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_tanh_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0x94,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_bf16 v5.h, v1.h quad_perm:[3,2,1,0] @@ -230,480 +230,480 @@ v_prng_b32 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 // GFX1250: v_prng_b32_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0x96,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_rcp_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_rcp_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_rcp_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_rcp_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_mirror -// GFX1250: v_rcp_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_mirror +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_half_mirror -// GFX1250: v_rcp_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_shl:1 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_shl:15 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_shr:1 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_shr:15 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_ror:1 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_ror:15 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_rcp_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_rcp_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_rcp_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_rcp_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf2,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_rcp_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_rcp_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf2,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rcp_bf16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_rcp_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_sqrt_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_sqrt_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_sqrt_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_sqrt_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_mirror -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_mirror +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_half_mirror -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_shl:1 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_shl:15 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_shr:1 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_shr:15 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_ror:1 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_ror:15 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_sqrt_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_sqrt_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_sqrt_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf4,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_sqrt_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_sqrt_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf4,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sqrt_bf16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_sqrt_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_rsq_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_rsq_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_rsq_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_rsq_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_mirror -// GFX1250: v_rsq_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_mirror +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_half_mirror -// GFX1250: v_rsq_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_shl:1 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_shl:15 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_shr:1 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_shr:15 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_ror:1 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_ror:15 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_rsq_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_rsq_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_rsq_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_rsq_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf6,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_rsq_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_rsq_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf6,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rsq_bf16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_rsq_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_log_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_log_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_log_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_log_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_log_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_log_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_mirror -// GFX1250: v_log_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_log_bf16 v5.l, v1.l row_mirror +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_half_mirror -// GFX1250: v_log_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_log_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_shl:1 -// GFX1250: v_log_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_log_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_shl:15 -// GFX1250: v_log_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_log_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_shr:1 -// GFX1250: v_log_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_log_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_shr:15 -// GFX1250: v_log_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_log_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_ror:1 -// GFX1250: v_log_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_log_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_ror:15 -// GFX1250: v_log_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_log_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_log_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_log_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_log_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_log_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_log_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_log_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_log_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf8,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_log_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_log_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf8,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_log_bf16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_log_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_exp_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_exp_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_exp_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_exp_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_exp_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_exp_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_mirror -// GFX1250: v_exp_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_mirror +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_half_mirror -// GFX1250: v_exp_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_shl:1 -// GFX1250: v_exp_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_shl:15 -// GFX1250: v_exp_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_shr:1 -// GFX1250: v_exp_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_shr:15 -// GFX1250: v_exp_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_ror:1 -// GFX1250: v_exp_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_ror:15 -// GFX1250: v_exp_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_exp_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_exp_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_exp_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_exp_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_exp_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_exp_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfa,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_exp_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_exp_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfa,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_exp_bf16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_exp_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_sin_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_sin_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_sin_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_sin_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_sin_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_sin_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_mirror -// GFX1250: v_sin_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_mirror +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_half_mirror -// GFX1250: v_sin_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_shl:1 -// GFX1250: v_sin_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_shl:15 -// GFX1250: v_sin_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_shr:1 -// GFX1250: v_sin_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_shr:15 -// GFX1250: v_sin_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_ror:1 -// GFX1250: v_sin_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_ror:15 -// GFX1250: v_sin_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_sin_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_sin_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_sin_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_sin_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_sin_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_sin_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfc,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_sin_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_sin_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfc,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sin_bf16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_sin_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_cos_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_cos_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_cos_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_cos_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_cos_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_cos_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_mirror -// GFX1250: v_cos_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_mirror +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_half_mirror -// GFX1250: v_cos_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_shl:1 -// GFX1250: v_cos_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_shl:15 -// GFX1250: v_cos_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_shr:1 -// GFX1250: v_cos_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_shr:15 -// GFX1250: v_cos_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_ror:1 -// GFX1250: v_cos_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_ror:15 -// GFX1250: v_cos_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_cos_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_cos_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_cos_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_cos_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_cos_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_cos_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfe,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_cos_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_cos_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfe,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cos_bf16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_cos_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_cvt_f32_bf16 v5, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_cvt_f32_bf16 v5, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_mirror -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_mirror +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_half_mirror -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_half_mirror +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_shl:1 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_shl:1 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_shl:15 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_shl:15 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_shr:1 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_shr:1 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_shr:15 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_shr:15 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_ror:1 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_ror:1 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_ror:15 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_ror:15 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_cvt_f32_bf16 v5, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_cvt_f32_bf16 v5, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_cvt_f32_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_cvt_f32_bf16 v127, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_cvt_f32_bf16_dpp v127, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_f32_bf16 v5, v1.h quad_perm:[3,2,1,0] @@ -750,24 +750,24 @@ v_cvt_pk_f16_fp8 v1, v2.h quad_perm:[0,1,2,3] // GFX1250: v_cvt_pk_f16_fp8_dpp v1, v2.h quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xea,0x02,0x7e,0x82,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf -// GFX1250: v_sat_pk4_i4_i8_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe6,0x02,0x7e,0x02,0x39,0x00,0xff] +v_sat_pk4_i4_i8 v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf +// GFX1250: v_sat_pk4_i4_i8_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe6,0x02,0x7e,0x02,0x39,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 -// GFX1250: v_sat_pk4_i4_i8_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0xfa,0xe6,0x02,0x7e,0x02,0x39,0x04,0xff] +v_sat_pk4_i4_i8 v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 +// GFX1250: v_sat_pk4_i4_i8_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0xfa,0xe6,0x02,0x7e,0x02,0x39,0x04,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_i4_i8 v1.h, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf // GFX1250: v_sat_pk4_i4_i8_dpp v1.h, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe6,0x02,0x7f,0x02,0x39,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf -// GFX1250: v_sat_pk4_u4_u8_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe8,0x02,0x7e,0x02,0x39,0x00,0xff] +v_sat_pk4_u4_u8 v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf +// GFX1250: v_sat_pk4_u4_u8_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe8,0x02,0x7e,0x02,0x39,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 -// GFX1250: v_sat_pk4_u4_u8_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0xfa,0xe8,0x02,0x7e,0x02,0x39,0x04,0xff] +v_sat_pk4_u4_u8 v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 +// GFX1250: v_sat_pk4_u4_u8_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0xfa,0xe8,0x02,0x7e,0x02,0x39,0x04,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_u4_u8 v1.h, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp8.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp8.s index 359aadc..2aabe39 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp8.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp8.s @@ -14,32 +14,32 @@ v_tanh_f32 v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0 // GFX1250: v_tanh_f32_dpp v255, v255 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x3c,0xfe,0x7f,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_tanh_f16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0x3e,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_tanh_f16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_tanh_f16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0x3e,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_tanh_f16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0x3e,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_tanh_f16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0x3e,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_tanh_f16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x3e,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_tanh_f16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_tanh_f16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x3e,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_f16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_tanh_f16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0x3e,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_tanh_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0x94,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_tanh_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0x94,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_tanh_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0x94,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_tanh_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0x94,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_tanh_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x94,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_tanh_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_tanh_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x94,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] @@ -58,152 +58,152 @@ v_prng_b32 v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0 // GFX1250: v_prng_b32_dpp v255, v255 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x96,0xfe,0x7f,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_rcp_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf2,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_rcp_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf2,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_rcp_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf2,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_rcp_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf2,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_rcp_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf2,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_rcp_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_rcp_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf2,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rcp_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_rcp_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf2,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sqrt_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf4,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_sqrt_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf4,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sqrt_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf4,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_sqrt_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf4,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_sqrt_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf4,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_sqrt_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_sqrt_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf4,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sqrt_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_sqrt_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf4,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_rsq_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf6,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_rsq_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf6,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_rsq_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf6,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_rsq_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf6,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_rsq_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf6,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_rsq_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_rsq_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf6,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rsq_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_rsq_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf6,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_log_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf8,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_log_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_log_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf8,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_log_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf8,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_log_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_log_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf8,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_log_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf8,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_log_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_log_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf8,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_log_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_log_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf8,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_exp_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfa,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_exp_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_exp_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfa,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_exp_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfa,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_exp_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfa,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_exp_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfa,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_exp_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_exp_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfa,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_exp_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_exp_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfa,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sin_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfc,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_sin_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sin_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfc,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sin_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfc,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_sin_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfc,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_sin_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfc,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_sin_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_sin_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfc,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sin_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_sin_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfc,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_cos_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfe,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_cos_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_cos_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfe,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_cos_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfe,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_cos_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfe,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_cos_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfe,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_cos_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_cos_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfe,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cos_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_cos_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfe,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_cvt_f32_bf16 v5, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_cvt_f32_bf16 v5, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_cvt_f32_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_cvt_f32_bf16 v127, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_cvt_f32_bf16_dpp v127, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_f32_bf16 v5, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_cvt_f32_bf16_dpp v5, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f16_bf8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_cvt_f16_bf8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05] +v_cvt_f16_bf8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_cvt_f16_bf8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f16_bf8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_cvt_f16_bf8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05] +v_cvt_f16_bf8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_cvt_f16_bf8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_f16_bf8 v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_cvt_f16_bf8_dpp v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf0,0x02,0x7f,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f16_fp8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_cvt_f16_fp8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xee,0x02,0x7e,0x02,0x77,0x39,0x05] +v_cvt_f16_fp8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_cvt_f16_fp8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xee,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f16_fp8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_cvt_f16_fp8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xee,0x02,0x7e,0x02,0x77,0x39,0x05] +v_cvt_f16_fp8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_cvt_f16_fp8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xee,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_f16_fp8 v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] @@ -226,24 +226,24 @@ v_cvt_pk_f16_fp8 v1, v2.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_cvt_pk_f16_fp8_dpp v1, v2.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xea,0x02,0x7e,0x82,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sat_pk4_i4_i8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe6,0x02,0x7e,0x02,0x77,0x39,0x05] +v_sat_pk4_i4_i8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sat_pk4_i4_i8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe6,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sat_pk4_i4_i8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe6,0x02,0x7e,0x02,0x77,0x39,0x05] +v_sat_pk4_i4_i8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sat_pk4_i4_i8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe6,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_i4_i8 v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_sat_pk4_i4_i8_dpp v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe6,0x02,0x7f,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sat_pk4_u4_u8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe8,0x02,0x7e,0x02,0x77,0x39,0x05] +v_sat_pk4_u4_u8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sat_pk4_u4_u8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe8,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sat_pk4_u4_u8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe8,0x02,0x7e,0x02,0x77,0x39,0x05] +v_sat_pk4_u4_u8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sat_pk4_u4_u8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe8,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_u4_u8 v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s index b4d4e36..98d07ac 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s @@ -52,32 +52,32 @@ v_bitop3_b32 v255, 0xaf123456, vcc_hi, null bitop3:103 v_bitop3_b16 v5.l, v1.l, v2.l, s3 // GFX1250: v_bitop3_b16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x33,0xd6,0x01,0x05,0x0e,0x00] -v_bitop3_b16 v5, v1, v2, s3 bitop3:161 -// GFX1250: v_bitop3_b16 v5, v1, v2, s3 bitop3:0xa1 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0x05,0x0e,0x30] +v_bitop3_b16 v5.l, v1.l, v2.l, s3 bitop3:161 +// GFX1250: v_bitop3_b16 v5.l, v1.l, v2.l, s3 bitop3:0xa1 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0x05,0x0e,0x30] -v_bitop3_b16 v5, v255, s2, s105 bitop3:0x27 -// GFX1250: v_bitop3_b16 v5, v255, s2, s105 bitop3:0x27 ; encoding: [0x05,0x04,0x33,0xd6,0xff,0x05,0xa4,0xe1] +v_bitop3_b16 v5.l, v255.l, s2, s105 bitop3:0x27 +// GFX1250: v_bitop3_b16 v5.l, v255.l, s2, s105 bitop3:0x27 ; encoding: [0x05,0x04,0x33,0xd6,0xff,0x05,0xa4,0xe1] -v_bitop3_b16 v5, s1, v255, exec_hi bitop3:100 -// GFX1250: v_bitop3_b16 v5, s1, v255, exec_hi bitop3:0x64 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0xfe,0xff,0x89] +v_bitop3_b16 v5.l, s1, v255.l, exec_hi bitop3:100 +// GFX1250: v_bitop3_b16 v5.l, s1, v255.l, exec_hi bitop3:0x64 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0xfe,0xff,0x89] -v_bitop3_b16 v5, s105, s105, exec_lo bitop3:0 -// GFX1250: v_bitop3_b16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x33,0xd6,0x69,0xd2,0xf8,0x01] +v_bitop3_b16 v5.l, s105, s105, exec_lo bitop3:0 +// GFX1250: v_bitop3_b16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x33,0xd6,0x69,0xd2,0xf8,0x01] -v_bitop3_b16 v5, vcc_lo, ttmp15, v3 bitop3:0x15 -// GFX1250: v_bitop3_b16 v5, vcc_lo, ttmp15, v3 bitop3:0x15 ; encoding: [0x05,0x02,0x33,0xd6,0x6a,0xf6,0x0c,0xa4] +v_bitop3_b16 v5.l, vcc_lo, ttmp15, v3.l bitop3:0x15 +// GFX1250: v_bitop3_b16 v5.l, vcc_lo, ttmp15, v3.l bitop3:0x15 ; encoding: [0x05,0x02,0x33,0xd6,0x6a,0xf6,0x0c,0xa4] -v_bitop3_b16 v5, vcc_hi, 0xfe0b, v255 bitop3:63 -// GFX1250: v_bitop3_b16 v5, vcc_hi, 0xfe0b, v255 bitop3:0x3f ; encoding: [0x05,0x07,0x33,0xd6,0x6b,0xfe,0xfd,0xe7,0x0b,0xfe,0x00,0x00] +v_bitop3_b16 v5.l, vcc_hi, 0xfe0b, v255.l bitop3:63 +// GFX1250: v_bitop3_b16 v5.l, vcc_hi, 0xfe0b, v255.l bitop3:0x3f ; encoding: [0x05,0x07,0x33,0xd6,0x6b,0xfe,0xfd,0xe7,0x0b,0xfe,0x00,0x00] -v_bitop3_b16 v5, ttmp15, src_scc, ttmp15 bitop3:0x24 -// GFX1250: v_bitop3_b16 v5, ttmp15, src_scc, ttmp15 bitop3:0x24 ; encoding: [0x05,0x04,0x33,0xd6,0x7b,0xfa,0xed,0x81] +v_bitop3_b16 v5.l, ttmp15, src_scc, ttmp15 bitop3:0x24 +// GFX1250: v_bitop3_b16 v5.l, ttmp15, src_scc, ttmp15 bitop3:0x24 ; encoding: [0x05,0x04,0x33,0xd6,0x7b,0xfa,0xed,0x81] -v_bitop3_b16 v5, m0, 0.5, m0 bitop3:5 -// GFX1250: v_bitop3_b16 v5, m0, 0.5, m0 bitop3:5 ; encoding: [0x05,0x00,0x33,0xd6,0x7d,0xe0,0xf5,0xa1] +v_bitop3_b16 v5.l, m0, 0.5, m0 bitop3:5 +// GFX1250: v_bitop3_b16 v5.l, m0, 0.5, m0 bitop3:5 ; encoding: [0x05,0x00,0x33,0xd6,0x7d,0xe0,0xf5,0xa1] -v_bitop3_b16 v5, exec_lo, -1, vcc_hi bitop3:6 -// GFX1250: v_bitop3_b16 v5, exec_lo, -1, vcc_hi bitop3:6 ; encoding: [0x05,0x00,0x33,0xd6,0x7e,0x82,0xad,0xc1] +v_bitop3_b16 v5.l, exec_lo, -1, vcc_hi bitop3:6 +// GFX1250: v_bitop3_b16 v5.l, exec_lo, -1, vcc_hi bitop3:6 ; encoding: [0x05,0x00,0x33,0xd6,0x7e,0x82,0xad,0xc1] v_bitop3_b16 v5.h, exec_hi, null, vcc_lo op_sel:[1,1,1,1] // GFX1250: v_bitop3_b16 v5.h, exec_hi, null, vcc_lo op_sel:[1,1,1,1] ; encoding: [0x05,0x78,0x33,0xd6,0x7f,0xf8,0xa8,0x01] @@ -563,17 +563,17 @@ v_cvt_sr_bf8_f16 v1, v2.l, v3 v_cvt_sr_bf8_f16 v1, v2.h, v3 // GFX1250: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00] -v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:0 -// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00] +v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:0 +// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00] -v_cvt_sr_bf8_f16 v1, v2, s3 -// GFX1250: v_cvt_sr_bf8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00] +v_cvt_sr_bf8_f16 v1, v2.l, s3 +// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00] -v_cvt_sr_bf8_f16 v1, v2, 0x1234 -// GFX1250: v_cvt_sr_bf8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00] +v_cvt_sr_bf8_f16 v1, v2.l, 0x1234 +// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00] -v_cvt_sr_bf8_f16 v1, -v2, v3 -// GFX1250: v_cvt_sr_bf8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20] +v_cvt_sr_bf8_f16 v1, -v2.l, v3 +// GFX1250: v_cvt_sr_bf8_f16 v1, -v2.l, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20] v_cvt_sr_bf8_f16 v1, |v2.l|, v3 // GFX1250: v_cvt_sr_bf8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00] @@ -605,14 +605,14 @@ v_cvt_sr_fp8_f16 v1, v2.l, v3 v_cvt_sr_fp8_f16 v1, v2.h, v3 // GFX1250: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00] -v_cvt_sr_fp8_f16 v1, v2, s3 -// GFX1250: v_cvt_sr_fp8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00] +v_cvt_sr_fp8_f16 v1, v2.l, s3 +// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00] -v_cvt_sr_fp8_f16 v1, v2, 0x1234 -// GFX1250: v_cvt_sr_fp8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00] +v_cvt_sr_fp8_f16 v1, v2.l, 0x1234 +// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00] -v_cvt_sr_fp8_f16 v1, -v2, v3 -// GFX1250: v_cvt_sr_fp8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20] +v_cvt_sr_fp8_f16 v1, -v2.l, v3 +// GFX1250: v_cvt_sr_fp8_f16 v1, -v2.l, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20] v_cvt_sr_fp8_f16 v1, |v2.l|, v3 // GFX1250: v_cvt_sr_fp8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00] @@ -644,11 +644,11 @@ v_cvt_pk_fp8_f32 v1.l, v2, v3 v_cvt_pk_fp8_f32 v1.h, v2, v3 // GFX1250: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] ; encoding: [0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00] -v_cvt_pk_fp8_f32 v1, -v2, |v3| -// GFX1250: v_cvt_pk_fp8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20] +v_cvt_pk_fp8_f32 v1.l, -v2, |v3| +// GFX1250: v_cvt_pk_fp8_f32 v1.l, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20] -v_cvt_pk_fp8_f32 v1, s2, 3 -// GFX1250: v_cvt_pk_fp8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00] +v_cvt_pk_fp8_f32 v1.l, s2, 3 +// GFX1250: v_cvt_pk_fp8_f32 v1.l, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00] v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp // GFX1250: v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp ; encoding: [0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00] @@ -656,14 +656,14 @@ v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp v_cvt_pk_fp8_f32 v1.h, v2, v3 clamp // GFX1250: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] clamp ; encoding: [0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00] -v_cvt_pk_bf8_f32 v1, v2, v3 -// GFX1250: v_cvt_pk_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00] +v_cvt_pk_bf8_f32 v1.l, v2, v3 +// GFX1250: v_cvt_pk_bf8_f32 v1.l, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00] -v_cvt_pk_bf8_f32 v1, -v2, |v3| -// GFX1250: v_cvt_pk_bf8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20] +v_cvt_pk_bf8_f32 v1.l, -v2, |v3| +// GFX1250: v_cvt_pk_bf8_f32 v1.l, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20] -v_cvt_pk_bf8_f32 v1, s2, 3 -// GFX1250: v_cvt_pk_bf8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00] +v_cvt_pk_bf8_f32 v1.l, s2, 3 +// GFX1250: v_cvt_pk_bf8_f32 v1.l, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00] v_cvt_sr_fp8_f32 v1, v2, v3 // GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00] diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s index f766e52..fc0ea8b 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s @@ -62,60 +62,60 @@ v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] // GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:161 quad_perm:[0,1,2,3] -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0xa1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x34,0x01,0xe4,0x00,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:161 quad_perm:[0,1,2,3] +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:0xa1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x34,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0xe4,0x01,0x40,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:0x27 row_mirror +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:0x27 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0xe4,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:100 row_half_mirror -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x64 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x8c,0x01,0x41,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:100 row_half_mirror +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:0x64 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x8c,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, v255 bitop3:0 row_shl:1 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v255.l bitop3:0 row_shl:1 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v255.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x16 row_shl:15 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x16 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x33,0xd6,0xfa,0x04,0xa6,0xc1,0x01,0x0f,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, s105 bitop3:0x16 row_shl:15 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, s105 bitop3:0x16 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x33,0xd6,0xfa,0x04,0xa6,0xc1,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:63 row_shr:1 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:0x3f row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x07,0x33,0xd6,0xfa,0x04,0xae,0xe1,0x01,0x11,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi bitop3:63 row_shr:1 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi bitop3:0x3f row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x07,0x33,0xd6,0xfa,0x04,0xae,0xe1,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo bitop3:0x24 row_shr:15 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo bitop3:0x24 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0xaa,0x81,0x01,0x1f,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo bitop3:0x24 row_shr:15 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo bitop3:0x24 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0xaa,0x81,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:5 row_ror:1 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:5 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xee,0xa1,0x01,0x21,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, ttmp15 bitop3:5 row_ror:1 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, ttmp15 bitop3:5 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xee,0xa1,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:6 row_ror:15 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0xc1,0x01,0x2f,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_hi bitop3:6 row_ror:15 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_hi bitop3:6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0xc1,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_lo row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_lo row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:77 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:0x4d row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x33,0xd6,0xfa,0x04,0xfa,0xa9,0x01,0x50,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_lo bitop3:77 row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_lo bitop3:0x4d row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x33,0xd6,0xfa,0x04,0xfa,0xa9,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:88 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:0x58 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x03,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, null bitop3:88 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, null bitop3:0x58 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x03,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:99 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:0x63 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x06,0x6b,0x01,0x60,0x09,0x13] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, -1 bitop3:99 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, -1 bitop3:0x63 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x06,0x6b,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:101 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:0x65 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x04,0x33,0xd6,0xfa,0xfe,0xf7,0xab,0xff,0x6f,0x05,0x30] +v_bitop3_b16_e64_dpp v255.l, v255.l, v255.l, src_scc bitop3:101 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_bitop3_b16_e64_dpp v255.l, v255.l, v255.l, src_scc bitop3:0x65 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x04,0x33,0xd6,0xfa,0xfe,0xf7,0xab,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf @@ -470,12 +470,12 @@ v_cvt_sr_bf8_f16 v1, v2.h, v3 quad_perm:[0,1,2,3] fi:1 // GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] -// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] +v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] +// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] -// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] +v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] +// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] @@ -494,12 +494,12 @@ v_cvt_sr_fp8_f16 v1, v2.h, v3 quad_perm:[0,1,2,3] fi:1 // GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] -// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] +v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] +// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] -// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] +v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] +// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s index 8e73ecb..5ac9eb4 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s @@ -46,50 +46,50 @@ v_bfrev_b32_e64 v5, src_scc v_bfrev_b32_e64 v255, 0xaf123456 // GFX1250: v_bfrev_b32_e64 v255, 0xaf123456 ; encoding: [0xff,0x00,0xb8,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf] -v_ceil_f16_e64 v5, v1 -// GFX1250: v_ceil_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xdc,0xd5,0x01,0x01,0x00,0x00] +v_ceil_f16_e64 v5.l, v1.l +// GFX1250: v_ceil_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xdc,0xd5,0x01,0x01,0x00,0x00] -v_ceil_f16_e64 v5, v255 -// GFX1250: v_ceil_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xdc,0xd5,0xff,0x01,0x00,0x00] +v_ceil_f16_e64 v5.l, v255.l +// GFX1250: v_ceil_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xdc,0xd5,0xff,0x01,0x00,0x00] -v_ceil_f16_e64 v5, s1 -// GFX1250: v_ceil_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xdc,0xd5,0x01,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, s1 +// GFX1250: v_ceil_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xdc,0xd5,0x01,0x00,0x00,0x00] -v_ceil_f16_e64 v5, s105 -// GFX1250: v_ceil_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xdc,0xd5,0x69,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, s105 +// GFX1250: v_ceil_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xdc,0xd5,0x69,0x00,0x00,0x00] -v_ceil_f16_e64 v5, vcc_lo -// GFX1250: v_ceil_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xdc,0xd5,0x6a,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, vcc_lo +// GFX1250: v_ceil_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xdc,0xd5,0x6a,0x00,0x00,0x00] -v_ceil_f16_e64 v5, vcc_hi -// GFX1250: v_ceil_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xdc,0xd5,0x6b,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, vcc_hi +// GFX1250: v_ceil_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xdc,0xd5,0x6b,0x00,0x00,0x00] -v_ceil_f16_e64 v5, ttmp15 -// GFX1250: v_ceil_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xdc,0xd5,0x7b,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, ttmp15 +// GFX1250: v_ceil_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xdc,0xd5,0x7b,0x00,0x00,0x00] -v_ceil_f16_e64 v5, m0 -// GFX1250: v_ceil_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xdc,0xd5,0x7d,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, m0 +// GFX1250: v_ceil_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xdc,0xd5,0x7d,0x00,0x00,0x00] -v_ceil_f16_e64 v5, exec_lo -// GFX1250: v_ceil_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xdc,0xd5,0x7e,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, exec_lo +// GFX1250: v_ceil_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xdc,0xd5,0x7e,0x00,0x00,0x00] -v_ceil_f16_e64 v5, exec_hi -// GFX1250: v_ceil_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xdc,0xd5,0x7f,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, exec_hi +// GFX1250: v_ceil_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xdc,0xd5,0x7f,0x00,0x00,0x00] -v_ceil_f16_e64 v5, null -// GFX1250: v_ceil_f16_e64 v5, null ; encoding: [0x05,0x00,0xdc,0xd5,0x7c,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, null +// GFX1250: v_ceil_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xdc,0xd5,0x7c,0x00,0x00,0x00] -v_ceil_f16_e64 v5, -1 -// GFX1250: v_ceil_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xdc,0xd5,0xc1,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, -1 +// GFX1250: v_ceil_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xdc,0xd5,0xc1,0x00,0x00,0x00] -v_ceil_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_ceil_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xdc,0xd5,0xf0,0x00,0x00,0x08] +v_ceil_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_ceil_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xdc,0xd5,0xf0,0x00,0x00,0x08] -v_ceil_f16_e64 v5, src_scc mul:4 -// GFX1250: v_ceil_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xdc,0xd5,0xfd,0x00,0x00,0x10] +v_ceil_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_ceil_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xdc,0xd5,0xfd,0x00,0x00,0x10] -v_ceil_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_ceil_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdc,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_ceil_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_ceil_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdc,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_ceil_f16 v5.l, v128.l // GFX1250: v_ceil_f16_e64 v5.l, v128.l ; encoding: [0x05,0x00,0xdc,0xd5,0x80,0x01,0x00,0x00] @@ -268,50 +268,50 @@ v_clz_i32_u32_e64 v5, src_scc v_clz_i32_u32_e64 v255, 0xaf123456 // GFX1250: v_clz_i32_u32_e64 v255, 0xaf123456 ; encoding: [0xff,0x00,0xb9,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf] -v_cos_f16_e64 v5, v1 -// GFX1250: v_cos_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xe1,0xd5,0x01,0x01,0x00,0x00] +v_cos_f16_e64 v5.l, v1.l +// GFX1250: v_cos_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xe1,0xd5,0x01,0x01,0x00,0x00] -v_cos_f16_e64 v5, v255 -// GFX1250: v_cos_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xe1,0xd5,0xff,0x01,0x00,0x00] +v_cos_f16_e64 v5.l, v255.l +// GFX1250: v_cos_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xe1,0xd5,0xff,0x01,0x00,0x00] -v_cos_f16_e64 v5, s1 -// GFX1250: v_cos_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xe1,0xd5,0x01,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, s1 +// GFX1250: v_cos_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe1,0xd5,0x01,0x00,0x00,0x00] -v_cos_f16_e64 v5, s105 -// GFX1250: v_cos_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xe1,0xd5,0x69,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, s105 +// GFX1250: v_cos_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe1,0xd5,0x69,0x00,0x00,0x00] -v_cos_f16_e64 v5, vcc_lo -// GFX1250: v_cos_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe1,0xd5,0x6a,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, vcc_lo +// GFX1250: v_cos_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe1,0xd5,0x6a,0x00,0x00,0x00] -v_cos_f16_e64 v5, vcc_hi -// GFX1250: v_cos_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe1,0xd5,0x6b,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, vcc_hi +// GFX1250: v_cos_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe1,0xd5,0x6b,0x00,0x00,0x00] -v_cos_f16_e64 v5, ttmp15 -// GFX1250: v_cos_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe1,0xd5,0x7b,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, ttmp15 +// GFX1250: v_cos_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe1,0xd5,0x7b,0x00,0x00,0x00] -v_cos_f16_e64 v5, m0 -// GFX1250: v_cos_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xe1,0xd5,0x7d,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, m0 +// GFX1250: v_cos_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe1,0xd5,0x7d,0x00,0x00,0x00] -v_cos_f16_e64 v5, exec_lo -// GFX1250: v_cos_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe1,0xd5,0x7e,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, exec_lo +// GFX1250: v_cos_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe1,0xd5,0x7e,0x00,0x00,0x00] -v_cos_f16_e64 v5, exec_hi -// GFX1250: v_cos_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe1,0xd5,0x7f,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, exec_hi +// GFX1250: v_cos_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe1,0xd5,0x7f,0x00,0x00,0x00] -v_cos_f16_e64 v5, null -// GFX1250: v_cos_f16_e64 v5, null ; encoding: [0x05,0x00,0xe1,0xd5,0x7c,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, null +// GFX1250: v_cos_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xe1,0xd5,0x7c,0x00,0x00,0x00] -v_cos_f16_e64 v5, -1 -// GFX1250: v_cos_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xe1,0xd5,0xc1,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, -1 +// GFX1250: v_cos_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe1,0xd5,0xc1,0x00,0x00,0x00] -v_cos_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_cos_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xe1,0xd5,0xf0,0x00,0x00,0x08] +v_cos_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_cos_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xe1,0xd5,0xf0,0x00,0x00,0x08] -v_cos_f16_e64 v5, src_scc mul:4 -// GFX1250: v_cos_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xe1,0xd5,0xfd,0x00,0x00,0x10] +v_cos_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_cos_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xe1,0xd5,0xfd,0x00,0x00,0x10] -v_cos_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_cos_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xe1,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_cos_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_cos_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xe1,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_cos_f16 v5.l, v128.l // GFX1250: v_cos_f16_e64 v5.l, v128.l ; encoding: [0x05,0x00,0xe1,0xd5,0x80,0x01,0x00,0x00] @@ -502,11 +502,11 @@ v_cvt_pk_f32_bf8_e64 v[2:3], 3 v_cvt_pk_f32_bf8_e64 v[2:3], 3 op_sel:[1,0] // GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], 3 op_sel:[1,0] ; encoding: [0x02,0x08,0xef,0xd5,0x83,0x00,0x00,0x00] -v_cvt_pk_f32_bf8_e64 v[2:3], v3 -// GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], v3 ; encoding: [0x02,0x00,0xef,0xd5,0x03,0x01,0x00,0x00] +v_cvt_pk_f32_bf8_e64 v[2:3], v3.l +// GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], v3.l ; encoding: [0x02,0x00,0xef,0xd5,0x03,0x01,0x00,0x00] -v_cvt_pk_f32_bf8_e64 v[2:3], v3 op_sel:[1,0] -// GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], v3 op_sel:[1,0] ; encoding: [0x02,0x08,0xef,0xd5,0x03,0x01,0x00,0x00] +v_cvt_pk_f32_bf8_e64 v[2:3], v3.h op_sel:[1,0] +// GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], v3.h op_sel:[1,0] ; encoding: [0x02,0x08,0xef,0xd5,0x03,0x01,0x00,0x00] v_cvt_pk_f32_bf8 v[2:3], v128.h // GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], v128.h op_sel:[1,0] ; encoding: [0x02,0x08,0xef,0xd5,0x80,0x01,0x00,0x00] @@ -526,11 +526,11 @@ v_cvt_pk_f32_fp8_e64 v[2:3], 3 v_cvt_pk_f32_fp8_e64 v[2:3], 3 op_sel:[1,0] // GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], 3 op_sel:[1,0] ; encoding: [0x02,0x08,0xee,0xd5,0x83,0x00,0x00,0x00] -v_cvt_pk_f32_fp8_e64 v[2:3], v3 -// GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], v3 ; encoding: [0x02,0x00,0xee,0xd5,0x03,0x01,0x00,0x00] +v_cvt_pk_f32_fp8_e64 v[2:3], v3.l +// GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], v3.l ; encoding: [0x02,0x00,0xee,0xd5,0x03,0x01,0x00,0x00] -v_cvt_pk_f32_fp8_e64 v[2:3], v3 op_sel:[1,0] -// GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], v3 op_sel:[1,0] ; encoding: [0x02,0x08,0xee,0xd5,0x03,0x01,0x00,0x00] +v_cvt_pk_f32_fp8_e64 v[2:3], v3.h op_sel:[1,0] +// GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], v3.h op_sel:[1,0] ; encoding: [0x02,0x08,0xee,0xd5,0x03,0x01,0x00,0x00] v_cvt_pk_f32_fp8 v[2:3], v128.h // GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], v128.h op_sel:[1,0] ; encoding: [0x02,0x08,0xee,0xd5,0x80,0x01,0x00,0x00] @@ -568,50 +568,50 @@ v_cvt_pk_f32_fp8_e64 v[4:5], v3 v_cvt_pk_f32_fp8_e64 v[4:5], v3 op_sel:[1,0] // GFX1250: v_cvt_pk_f32_fp8_e64 v[4:5], v3 op_sel:[1,0] ; encoding: [0x04,0x08,0xee,0xd5,0x03,0x01,0x00,0x00] -v_cvt_f16_f32_e64 v5, v1 -// GFX1250: v_cvt_f16_f32_e64 v5, v1 ; encoding: [0x05,0x00,0x8a,0xd5,0x01,0x01,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, v1 +// GFX1250: v_cvt_f16_f32_e64 v5.l, v1 ; encoding: [0x05,0x00,0x8a,0xd5,0x01,0x01,0x00,0x00] -v_cvt_f16_f32_e64 v5, v255 -// GFX1250: v_cvt_f16_f32_e64 v5, v255 ; encoding: [0x05,0x00,0x8a,0xd5,0xff,0x01,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, v255 +// GFX1250: v_cvt_f16_f32_e64 v5.l, v255 ; encoding: [0x05,0x00,0x8a,0xd5,0xff,0x01,0x00,0x00] -v_cvt_f16_f32_e64 v5, s1 -// GFX1250: v_cvt_f16_f32_e64 v5, s1 ; encoding: [0x05,0x00,0x8a,0xd5,0x01,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, s1 +// GFX1250: v_cvt_f16_f32_e64 v5.l, s1 ; encoding: [0x05,0x00,0x8a,0xd5,0x01,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, s105 -// GFX1250: v_cvt_f16_f32_e64 v5, s105 ; encoding: [0x05,0x00,0x8a,0xd5,0x69,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, s105 +// GFX1250: v_cvt_f16_f32_e64 v5.l, s105 ; encoding: [0x05,0x00,0x8a,0xd5,0x69,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, vcc_lo -// GFX1250: v_cvt_f16_f32_e64 v5, vcc_lo ; encoding: [0x05,0x00,0x8a,0xd5,0x6a,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, vcc_lo +// GFX1250: v_cvt_f16_f32_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0x8a,0xd5,0x6a,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, vcc_hi -// GFX1250: v_cvt_f16_f32_e64 v5, vcc_hi ; encoding: [0x05,0x00,0x8a,0xd5,0x6b,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, vcc_hi +// GFX1250: v_cvt_f16_f32_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0x8a,0xd5,0x6b,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, ttmp15 -// GFX1250: v_cvt_f16_f32_e64 v5, ttmp15 ; encoding: [0x05,0x00,0x8a,0xd5,0x7b,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, ttmp15 +// GFX1250: v_cvt_f16_f32_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0x8a,0xd5,0x7b,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, m0 -// GFX1250: v_cvt_f16_f32_e64 v5, m0 ; encoding: [0x05,0x00,0x8a,0xd5,0x7d,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, m0 +// GFX1250: v_cvt_f16_f32_e64 v5.l, m0 ; encoding: [0x05,0x00,0x8a,0xd5,0x7d,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, exec_lo -// GFX1250: v_cvt_f16_f32_e64 v5, exec_lo ; encoding: [0x05,0x00,0x8a,0xd5,0x7e,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, exec_lo +// GFX1250: v_cvt_f16_f32_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0x8a,0xd5,0x7e,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, exec_hi -// GFX1250: v_cvt_f16_f32_e64 v5, exec_hi ; encoding: [0x05,0x00,0x8a,0xd5,0x7f,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, exec_hi +// GFX1250: v_cvt_f16_f32_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0x8a,0xd5,0x7f,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, null -// GFX1250: v_cvt_f16_f32_e64 v5, null ; encoding: [0x05,0x00,0x8a,0xd5,0x7c,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, null +// GFX1250: v_cvt_f16_f32_e64 v5.l, null ; encoding: [0x05,0x00,0x8a,0xd5,0x7c,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, -1 -// GFX1250: v_cvt_f16_f32_e64 v5, -1 ; encoding: [0x05,0x00,0x8a,0xd5,0xc1,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, -1 +// GFX1250: v_cvt_f16_f32_e64 v5.l, -1 ; encoding: [0x05,0x00,0x8a,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, 0.5 mul:2 -// GFX1250: v_cvt_f16_f32_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0x8a,0xd5,0xf0,0x00,0x00,0x08] +v_cvt_f16_f32_e64 v5.l, 0.5 mul:2 +// GFX1250: v_cvt_f16_f32_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0x8a,0xd5,0xf0,0x00,0x00,0x08] -v_cvt_f16_f32_e64 v5, src_scc mul:4 -// GFX1250: v_cvt_f16_f32_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0x8a,0xd5,0xfd,0x00,0x00,0x10] +v_cvt_f16_f32_e64 v5.l, src_scc mul:4 +// GFX1250: v_cvt_f16_f32_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0x8a,0xd5,0xfd,0x00,0x00,0x10] -v_cvt_f16_f32_e64 v255, -|0xaf123456| clamp div:2 -// GFX1250: v_cvt_f16_f32_e64 v255, -|0xaf123456| clamp div:2 ; encoding: [0xff,0x81,0x8a,0xd5,0xff,0x00,0x00,0x38,0x56,0x34,0x12,0xaf] +v_cvt_f16_f32_e64 v255.l, -|0xaf123456| clamp div:2 +// GFX1250: v_cvt_f16_f32_e64 v255.l, -|0xaf123456| clamp div:2 ; encoding: [0xff,0x81,0x8a,0xd5,0xff,0x00,0x00,0x38,0x56,0x34,0x12,0xaf] v_cvt_f16_f32 v128.l, v15 // GFX1250: v_cvt_f16_f32_e64 v128.l, v15 ; encoding: [0x80,0x00,0x8a,0xd5,0x0f,0x01,0x00,0x00] @@ -619,50 +619,50 @@ v_cvt_f16_f32 v128.l, v15 v_cvt_f16_f32 v128.h, v15 // GFX1250: v_cvt_f16_f32_e64 v128.h, v15 op_sel:[0,1] ; encoding: [0x80,0x40,0x8a,0xd5,0x0f,0x01,0x00,0x00] -v_cvt_f16_i16_e64 v5, v1 -// GFX1250: v_cvt_f16_i16_e64 v5, v1 ; encoding: [0x05,0x00,0xd1,0xd5,0x01,0x01,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, v1.l +// GFX1250: v_cvt_f16_i16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd1,0xd5,0x01,0x01,0x00,0x00] -v_cvt_f16_i16_e64 v5, v255 -// GFX1250: v_cvt_f16_i16_e64 v5, v255 ; encoding: [0x05,0x00,0xd1,0xd5,0xff,0x01,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, v255.l +// GFX1250: v_cvt_f16_i16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd1,0xd5,0xff,0x01,0x00,0x00] -v_cvt_f16_i16_e64 v5, s1 -// GFX1250: v_cvt_f16_i16_e64 v5, s1 ; encoding: [0x05,0x00,0xd1,0xd5,0x01,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, s1 +// GFX1250: v_cvt_f16_i16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd1,0xd5,0x01,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, s105 -// GFX1250: v_cvt_f16_i16_e64 v5, s105 ; encoding: [0x05,0x00,0xd1,0xd5,0x69,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, s105 +// GFX1250: v_cvt_f16_i16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd1,0xd5,0x69,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, vcc_lo -// GFX1250: v_cvt_f16_i16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd1,0xd5,0x6a,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, vcc_lo +// GFX1250: v_cvt_f16_i16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd1,0xd5,0x6a,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, vcc_hi -// GFX1250: v_cvt_f16_i16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd1,0xd5,0x6b,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, vcc_hi +// GFX1250: v_cvt_f16_i16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd1,0xd5,0x6b,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, ttmp15 -// GFX1250: v_cvt_f16_i16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd1,0xd5,0x7b,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, ttmp15 +// GFX1250: v_cvt_f16_i16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd1,0xd5,0x7b,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, m0 -// GFX1250: v_cvt_f16_i16_e64 v5, m0 ; encoding: [0x05,0x00,0xd1,0xd5,0x7d,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, m0 +// GFX1250: v_cvt_f16_i16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd1,0xd5,0x7d,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, exec_lo -// GFX1250: v_cvt_f16_i16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd1,0xd5,0x7e,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, exec_lo +// GFX1250: v_cvt_f16_i16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd1,0xd5,0x7e,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, exec_hi -// GFX1250: v_cvt_f16_i16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd1,0xd5,0x7f,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, exec_hi +// GFX1250: v_cvt_f16_i16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd1,0xd5,0x7f,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, null -// GFX1250: v_cvt_f16_i16_e64 v5, null ; encoding: [0x05,0x00,0xd1,0xd5,0x7c,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, null +// GFX1250: v_cvt_f16_i16_e64 v5.l, null ; encoding: [0x05,0x00,0xd1,0xd5,0x7c,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, -1 -// GFX1250: v_cvt_f16_i16_e64 v5, -1 ; encoding: [0x05,0x00,0xd1,0xd5,0xc1,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, -1 +// GFX1250: v_cvt_f16_i16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd1,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, 0.5 mul:2 -// GFX1250: v_cvt_f16_i16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd1,0xd5,0xf0,0x00,0x00,0x08] +v_cvt_f16_i16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_cvt_f16_i16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd1,0xd5,0xf0,0x00,0x00,0x08] -v_cvt_f16_i16_e64 v5, src_scc mul:4 -// GFX1250: v_cvt_f16_i16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd1,0xd5,0xfd,0x00,0x00,0x10] +v_cvt_f16_i16_e64 v5.l, src_scc mul:4 +// GFX1250: v_cvt_f16_i16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd1,0xd5,0xfd,0x00,0x00,0x10] -v_cvt_f16_i16_e64 v255, 0xfe0b clamp div:2 -// GFX1250: v_cvt_f16_i16_e64 v255, 0xfe0b clamp div:2 ; encoding: [0xff,0x80,0xd1,0xd5,0xff,0x00,0x00,0x18,0x0b,0xfe,0x00,0x00] +v_cvt_f16_i16_e64 v255.l, 0xfe0b clamp div:2 +// GFX1250: v_cvt_f16_i16_e64 v255.l, 0xfe0b clamp div:2 ; encoding: [0xff,0x80,0xd1,0xd5,0xff,0x00,0x00,0x18,0x0b,0xfe,0x00,0x00] v_cvt_f16_i16 v128.l, v15.l // GFX1250: v_cvt_f16_i16_e64 v128.l, v15.l ; encoding: [0x80,0x00,0xd1,0xd5,0x0f,0x01,0x00,0x00] @@ -670,50 +670,50 @@ v_cvt_f16_i16 v128.l, v15.l v_cvt_f16_i16 v128.h, v15.h // GFX1250: v_cvt_f16_i16_e64 v128.h, v15.h op_sel:[1,1] ; encoding: [0x80,0x48,0xd1,0xd5,0x0f,0x01,0x00,0x00] -v_cvt_f16_u16_e64 v5, v1 -// GFX1250: v_cvt_f16_u16_e64 v5, v1 ; encoding: [0x05,0x00,0xd0,0xd5,0x01,0x01,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, v1.l +// GFX1250: v_cvt_f16_u16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd0,0xd5,0x01,0x01,0x00,0x00] -v_cvt_f16_u16_e64 v5, v255 -// GFX1250: v_cvt_f16_u16_e64 v5, v255 ; encoding: [0x05,0x00,0xd0,0xd5,0xff,0x01,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, v255.l +// GFX1250: v_cvt_f16_u16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd0,0xd5,0xff,0x01,0x00,0x00] -v_cvt_f16_u16_e64 v5, s1 -// GFX1250: v_cvt_f16_u16_e64 v5, s1 ; encoding: [0x05,0x00,0xd0,0xd5,0x01,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, s1 +// GFX1250: v_cvt_f16_u16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd0,0xd5,0x01,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, s105 -// GFX1250: v_cvt_f16_u16_e64 v5, s105 ; encoding: [0x05,0x00,0xd0,0xd5,0x69,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, s105 +// GFX1250: v_cvt_f16_u16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd0,0xd5,0x69,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, vcc_lo -// GFX1250: v_cvt_f16_u16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd0,0xd5,0x6a,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, vcc_lo +// GFX1250: v_cvt_f16_u16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd0,0xd5,0x6a,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, vcc_hi -// GFX1250: v_cvt_f16_u16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd0,0xd5,0x6b,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, vcc_hi +// GFX1250: v_cvt_f16_u16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd0,0xd5,0x6b,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, ttmp15 -// GFX1250: v_cvt_f16_u16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd0,0xd5,0x7b,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, ttmp15 +// GFX1250: v_cvt_f16_u16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd0,0xd5,0x7b,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, m0 -// GFX1250: v_cvt_f16_u16_e64 v5, m0 ; encoding: [0x05,0x00,0xd0,0xd5,0x7d,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, m0 +// GFX1250: v_cvt_f16_u16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd0,0xd5,0x7d,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, exec_lo -// GFX1250: v_cvt_f16_u16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd0,0xd5,0x7e,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, exec_lo +// GFX1250: v_cvt_f16_u16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd0,0xd5,0x7e,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, exec_hi -// GFX1250: v_cvt_f16_u16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd0,0xd5,0x7f,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, exec_hi +// GFX1250: v_cvt_f16_u16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd0,0xd5,0x7f,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, null -// GFX1250: v_cvt_f16_u16_e64 v5, null ; encoding: [0x05,0x00,0xd0,0xd5,0x7c,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, null +// GFX1250: v_cvt_f16_u16_e64 v5.l, null ; encoding: [0x05,0x00,0xd0,0xd5,0x7c,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, -1 -// GFX1250: v_cvt_f16_u16_e64 v5, -1 ; encoding: [0x05,0x00,0xd0,0xd5,0xc1,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, -1 +// GFX1250: v_cvt_f16_u16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd0,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, 0.5 mul:2 -// GFX1250: v_cvt_f16_u16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd0,0xd5,0xf0,0x00,0x00,0x08] +v_cvt_f16_u16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_cvt_f16_u16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd0,0xd5,0xf0,0x00,0x00,0x08] -v_cvt_f16_u16_e64 v5, src_scc mul:4 -// GFX1250: v_cvt_f16_u16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd0,0xd5,0xfd,0x00,0x00,0x10] +v_cvt_f16_u16_e64 v5.l, src_scc mul:4 +// GFX1250: v_cvt_f16_u16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd0,0xd5,0xfd,0x00,0x00,0x10] -v_cvt_f16_u16_e64 v255, 0xfe0b clamp div:2 -// GFX1250: v_cvt_f16_u16_e64 v255, 0xfe0b clamp div:2 ; encoding: [0xff,0x80,0xd0,0xd5,0xff,0x00,0x00,0x18,0x0b,0xfe,0x00,0x00] +v_cvt_f16_u16_e64 v255.l, 0xfe0b clamp div:2 +// GFX1250: v_cvt_f16_u16_e64 v255.l, 0xfe0b clamp div:2 ; encoding: [0xff,0x80,0xd0,0xd5,0xff,0x00,0x00,0x18,0x0b,0xfe,0x00,0x00] v_cvt_f16_u16 v128.l, v15.l // GFX1250: v_cvt_f16_u16_e64 v128.l, v15.l ; encoding: [0x80,0x00,0xd0,0xd5,0x0f,0x01,0x00,0x00] @@ -721,11 +721,11 @@ v_cvt_f16_u16 v128.l, v15.l v_cvt_f16_u16 v128.h, v15.h // GFX1250: v_cvt_f16_u16_e64 v128.h, v15.h op_sel:[1,1] ; encoding: [0x80,0x48,0xd0,0xd5,0x0f,0x01,0x00,0x00] -v_cvt_f32_f16_e64 v5, v1 -// GFX1250: v_cvt_f32_f16_e64 v5, v1 ; encoding: [0x05,0x00,0x8b,0xd5,0x01,0x01,0x00,0x00] +v_cvt_f32_f16_e64 v5, v1.l +// GFX1250: v_cvt_f32_f16_e64 v5, v1.l ; encoding: [0x05,0x00,0x8b,0xd5,0x01,0x01,0x00,0x00] -v_cvt_f32_f16_e64 v5, v255 -// GFX1250: v_cvt_f32_f16_e64 v5, v255 ; encoding: [0x05,0x00,0x8b,0xd5,0xff,0x01,0x00,0x00] +v_cvt_f32_f16_e64 v5, v255.l +// GFX1250: v_cvt_f32_f16_e64 v5, v255.l ; encoding: [0x05,0x00,0x8b,0xd5,0xff,0x01,0x00,0x00] v_cvt_f32_f16_e64 v5, s1 // GFX1250: v_cvt_f32_f16_e64 v5, s1 ; encoding: [0x05,0x00,0x8b,0xd5,0x01,0x00,0x00,0x00] @@ -1303,50 +1303,50 @@ v_cvt_flr_i32_f32_e64 v5, src_scc v_cvt_flr_i32_f32_e64 v255, -|0xaf123456| // GFX1250: v_cvt_floor_i32_f32_e64 v255, -|0xaf123456| ; encoding: [0xff,0x01,0x8d,0xd5,0xff,0x00,0x00,0x20,0x56,0x34,0x12,0xaf] -v_cvt_i16_f16_e64 v5, v1 -// GFX1250: v_cvt_i16_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd3,0xd5,0x01,0x01,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, v1.l +// GFX1250: v_cvt_i16_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd3,0xd5,0x01,0x01,0x00,0x00] -v_cvt_i16_f16_e64 v5, v255 -// GFX1250: v_cvt_i16_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd3,0xd5,0xff,0x01,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, v255.l +// GFX1250: v_cvt_i16_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd3,0xd5,0xff,0x01,0x00,0x00] -v_cvt_i16_f16_e64 v5, s1 -// GFX1250: v_cvt_i16_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd3,0xd5,0x01,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, s1 +// GFX1250: v_cvt_i16_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd3,0xd5,0x01,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, s105 -// GFX1250: v_cvt_i16_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd3,0xd5,0x69,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, s105 +// GFX1250: v_cvt_i16_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd3,0xd5,0x69,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, vcc_lo -// GFX1250: v_cvt_i16_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd3,0xd5,0x6a,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, vcc_lo +// GFX1250: v_cvt_i16_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd3,0xd5,0x6a,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, vcc_hi -// GFX1250: v_cvt_i16_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd3,0xd5,0x6b,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, vcc_hi +// GFX1250: v_cvt_i16_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd3,0xd5,0x6b,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, ttmp15 -// GFX1250: v_cvt_i16_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd3,0xd5,0x7b,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, ttmp15 +// GFX1250: v_cvt_i16_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd3,0xd5,0x7b,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, m0 -// GFX1250: v_cvt_i16_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd3,0xd5,0x7d,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, m0 +// GFX1250: v_cvt_i16_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd3,0xd5,0x7d,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, exec_lo -// GFX1250: v_cvt_i16_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd3,0xd5,0x7e,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, exec_lo +// GFX1250: v_cvt_i16_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd3,0xd5,0x7e,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, exec_hi -// GFX1250: v_cvt_i16_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd3,0xd5,0x7f,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, exec_hi +// GFX1250: v_cvt_i16_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd3,0xd5,0x7f,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, null -// GFX1250: v_cvt_i16_f16_e64 v5, null ; encoding: [0x05,0x00,0xd3,0xd5,0x7c,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, null +// GFX1250: v_cvt_i16_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd3,0xd5,0x7c,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, -1 -// GFX1250: v_cvt_i16_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd3,0xd5,0xc1,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, -1 +// GFX1250: v_cvt_i16_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd3,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, 0.5 -// GFX1250: v_cvt_i16_f16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xd3,0xd5,0xf0,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, 0.5 +// GFX1250: v_cvt_i16_f16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xd3,0xd5,0xf0,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, src_scc -// GFX1250: v_cvt_i16_f16_e64 v5, src_scc ; encoding: [0x05,0x00,0xd3,0xd5,0xfd,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, src_scc +// GFX1250: v_cvt_i16_f16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xd3,0xd5,0xfd,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v255, -|0xfe0b| clamp -// GFX1250: v_cvt_i16_f16_e64 v255, -|0xfe0b| clamp ; encoding: [0xff,0x81,0xd3,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] +v_cvt_i16_f16_e64 v255.l, -|0xfe0b| clamp +// GFX1250: v_cvt_i16_f16_e64 v255.l, -|0xfe0b| clamp ; encoding: [0xff,0x81,0xd3,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] v_cvt_i16_f16 v1.l, v128.l // GFX1250: v_cvt_i16_f16_e64 v1.l, v128.l ; encoding: [0x01,0x00,0xd3,0xd5,0x80,0x01,0x00,0x00] @@ -1435,11 +1435,11 @@ v_cvt_i32_f64_e64 v5, -|src_scc| v_cvt_i32_f64_e64 v255, 0xaf123456 clamp // GFX1250: v_cvt_i32_f64_e64 v255, 0xaf123456 clamp ; encoding: [0xff,0x80,0x83,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf] -v_cvt_i32_i16_e64 v5, v1 -// GFX1250: v_cvt_i32_i16_e64 v5, v1 ; encoding: [0x05,0x00,0xea,0xd5,0x01,0x01,0x00,0x00] +v_cvt_i32_i16_e64 v5, v1.l +// GFX1250: v_cvt_i32_i16_e64 v5, v1.l ; encoding: [0x05,0x00,0xea,0xd5,0x01,0x01,0x00,0x00] -v_cvt_i32_i16_e64 v5, v255 -// GFX1250: v_cvt_i32_i16_e64 v5, v255 ; encoding: [0x05,0x00,0xea,0xd5,0xff,0x01,0x00,0x00] +v_cvt_i32_i16_e64 v5, v255.l +// GFX1250: v_cvt_i32_i16_e64 v5, v255.l ; encoding: [0x05,0x00,0xea,0xd5,0xff,0x01,0x00,0x00] v_cvt_i32_i16_e64 v5, s1 // GFX1250: v_cvt_i32_i16_e64 v5, s1 ; encoding: [0x05,0x00,0xea,0xd5,0x01,0x00,0x00,0x00] @@ -1531,50 +1531,50 @@ v_cvt_nearest_i32_f32_e64 v5, src_scc v_cvt_nearest_i32_f32_e64 v255, -|0xaf123456| // GFX1250: v_cvt_nearest_i32_f32_e64 v255, -|0xaf123456| ; encoding: [0xff,0x01,0x8c,0xd5,0xff,0x00,0x00,0x20,0x56,0x34,0x12,0xaf] -v_cvt_norm_i16_f16_e64 v5, v1 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xe3,0xd5,0x01,0x01,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, v1.l +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xe3,0xd5,0x01,0x01,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, v255 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xe3,0xd5,0xff,0x01,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, v255.l +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xe3,0xd5,0xff,0x01,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, s1 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xe3,0xd5,0x01,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, s1 +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe3,0xd5,0x01,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, s105 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xe3,0xd5,0x69,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, s105 +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe3,0xd5,0x69,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, vcc_lo -// GFX1250: v_cvt_norm_i16_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe3,0xd5,0x6a,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, vcc_lo +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe3,0xd5,0x6a,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, vcc_hi -// GFX1250: v_cvt_norm_i16_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe3,0xd5,0x6b,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, vcc_hi +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe3,0xd5,0x6b,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, ttmp15 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe3,0xd5,0x7b,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, ttmp15 +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe3,0xd5,0x7b,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, m0 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xe3,0xd5,0x7d,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, m0 +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe3,0xd5,0x7d,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, exec_lo -// GFX1250: v_cvt_norm_i16_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe3,0xd5,0x7e,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, exec_lo +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe3,0xd5,0x7e,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, exec_hi -// GFX1250: v_cvt_norm_i16_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe3,0xd5,0x7f,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, exec_hi +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe3,0xd5,0x7f,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, null -// GFX1250: v_cvt_norm_i16_f16_e64 v5, null ; encoding: [0x05,0x00,0xe3,0xd5,0x7c,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, null +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xe3,0xd5,0x7c,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, -1 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xe3,0xd5,0xc1,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, -1 +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe3,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, 0.5 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xe3,0xd5,0xf0,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, 0.5 +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xe3,0xd5,0xf0,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, src_scc -// GFX1250: v_cvt_norm_i16_f16_e64 v5, src_scc ; encoding: [0x05,0x00,0xe3,0xd5,0xfd,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, src_scc +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xe3,0xd5,0xfd,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v255, -|0xfe0b| -// GFX1250: v_cvt_norm_i16_f16_e64 v255, -|0xfe0b| ; encoding: [0xff,0x01,0xe3,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] +v_cvt_norm_i16_f16_e64 v255.l, -|0xfe0b| +// GFX1250: v_cvt_norm_i16_f16_e64 v255.l, -|0xfe0b| ; encoding: [0xff,0x01,0xe3,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] v_cvt_norm_i16_f16 v1.l, v128.l // GFX1250: v_cvt_norm_i16_f16_e64 v1.l, v128.l ; encoding: [0x01,0x00,0xe3,0xd5,0x80,0x01,0x00,0x00] @@ -1582,50 +1582,50 @@ v_cvt_norm_i16_f16 v1.l, v128.l v_cvt_norm_i16_f16 v1.l, v128.h // GFX1250: v_cvt_norm_i16_f16_e64 v1.l, v128.h op_sel:[1,0] ; encoding: [0x01,0x08,0xe3,0xd5,0x80,0x01,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, v1 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xe4,0xd5,0x01,0x01,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, v1.l +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xe4,0xd5,0x01,0x01,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, v255 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xe4,0xd5,0xff,0x01,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, v255.l +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xe4,0xd5,0xff,0x01,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, s1 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xe4,0xd5,0x01,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, s1 +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe4,0xd5,0x01,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, s105 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xe4,0xd5,0x69,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, s105 +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe4,0xd5,0x69,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, vcc_lo -// GFX1250: v_cvt_norm_u16_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe4,0xd5,0x6a,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, vcc_lo +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe4,0xd5,0x6a,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, vcc_hi -// GFX1250: v_cvt_norm_u16_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe4,0xd5,0x6b,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, vcc_hi +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe4,0xd5,0x6b,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, ttmp15 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe4,0xd5,0x7b,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, ttmp15 +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe4,0xd5,0x7b,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, m0 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xe4,0xd5,0x7d,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, m0 +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe4,0xd5,0x7d,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, exec_lo -// GFX1250: v_cvt_norm_u16_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe4,0xd5,0x7e,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, exec_lo +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe4,0xd5,0x7e,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, exec_hi -// GFX1250: v_cvt_norm_u16_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe4,0xd5,0x7f,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, exec_hi +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe4,0xd5,0x7f,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, null -// GFX1250: v_cvt_norm_u16_f16_e64 v5, null ; encoding: [0x05,0x00,0xe4,0xd5,0x7c,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, null +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xe4,0xd5,0x7c,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, -1 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xe4,0xd5,0xc1,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, -1 +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe4,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, 0.5 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xe4,0xd5,0xf0,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, 0.5 +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xe4,0xd5,0xf0,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, src_scc -// GFX1250: v_cvt_norm_u16_f16_e64 v5, src_scc ; encoding: [0x05,0x00,0xe4,0xd5,0xfd,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, src_scc +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xe4,0xd5,0xfd,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v255, -|0xfe0b| -// GFX1250: v_cvt_norm_u16_f16_e64 v255, -|0xfe0b| ; encoding: [0xff,0x01,0xe4,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] +v_cvt_norm_u16_f16_e64 v255.l, -|0xfe0b| +// GFX1250: v_cvt_norm_u16_f16_e64 v255.l, -|0xfe0b| ; encoding: [0xff,0x01,0xe4,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] v_cvt_norm_u16_f16 v1.l, v128.l // GFX1250: v_cvt_norm_u16_f16_e64 v1.l, v128.l ; encoding: [0x01,0x00,0xe4,0xd5,0x80,0x01,0x00,0x00] @@ -1723,50 +1723,50 @@ v_cvt_rpi_i32_f32_e64 v5, src_scc v_cvt_rpi_i32_f32_e64 v255, -|0xaf123456| // GFX1250: v_cvt_nearest_i32_f32_e64 v255, -|0xaf123456| ; encoding: [0xff,0x01,0x8c,0xd5,0xff,0x00,0x00,0x20,0x56,0x34,0x12,0xaf] -v_cvt_u16_f16_e64 v5, v1 -// GFX1250: v_cvt_u16_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd2,0xd5,0x01,0x01,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, v1.l +// GFX1250: v_cvt_u16_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd2,0xd5,0x01,0x01,0x00,0x00] -v_cvt_u16_f16_e64 v5, v255 -// GFX1250: v_cvt_u16_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd2,0xd5,0xff,0x01,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, v255.l +// GFX1250: v_cvt_u16_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd2,0xd5,0xff,0x01,0x00,0x00] -v_cvt_u16_f16_e64 v5, s1 -// GFX1250: v_cvt_u16_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd2,0xd5,0x01,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, s1 +// GFX1250: v_cvt_u16_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd2,0xd5,0x01,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, s105 -// GFX1250: v_cvt_u16_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd2,0xd5,0x69,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, s105 +// GFX1250: v_cvt_u16_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd2,0xd5,0x69,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, vcc_lo -// GFX1250: v_cvt_u16_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd2,0xd5,0x6a,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, vcc_lo +// GFX1250: v_cvt_u16_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd2,0xd5,0x6a,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, vcc_hi -// GFX1250: v_cvt_u16_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd2,0xd5,0x6b,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, vcc_hi +// GFX1250: v_cvt_u16_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd2,0xd5,0x6b,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, ttmp15 -// GFX1250: v_cvt_u16_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd2,0xd5,0x7b,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, ttmp15 +// GFX1250: v_cvt_u16_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd2,0xd5,0x7b,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, m0 -// GFX1250: v_cvt_u16_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd2,0xd5,0x7d,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, m0 +// GFX1250: v_cvt_u16_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd2,0xd5,0x7d,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, exec_lo -// GFX1250: v_cvt_u16_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd2,0xd5,0x7e,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, exec_lo +// GFX1250: v_cvt_u16_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd2,0xd5,0x7e,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, exec_hi -// GFX1250: v_cvt_u16_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd2,0xd5,0x7f,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, exec_hi +// GFX1250: v_cvt_u16_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd2,0xd5,0x7f,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, null -// GFX1250: v_cvt_u16_f16_e64 v5, null ; encoding: [0x05,0x00,0xd2,0xd5,0x7c,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, null +// GFX1250: v_cvt_u16_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd2,0xd5,0x7c,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, -1 -// GFX1250: v_cvt_u16_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd2,0xd5,0xc1,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, -1 +// GFX1250: v_cvt_u16_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd2,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, 0.5 -// GFX1250: v_cvt_u16_f16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xd2,0xd5,0xf0,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, 0.5 +// GFX1250: v_cvt_u16_f16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xd2,0xd5,0xf0,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, src_scc -// GFX1250: v_cvt_u16_f16_e64 v5, src_scc ; encoding: [0x05,0x00,0xd2,0xd5,0xfd,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, src_scc +// GFX1250: v_cvt_u16_f16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xd2,0xd5,0xfd,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v255, -|0xfe0b| clamp -// GFX1250: v_cvt_u16_f16_e64 v255, -|0xfe0b| clamp ; encoding: [0xff,0x81,0xd2,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] +v_cvt_u16_f16_e64 v255.l, -|0xfe0b| clamp +// GFX1250: v_cvt_u16_f16_e64 v255.l, -|0xfe0b| clamp ; encoding: [0xff,0x81,0xd2,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] v_cvt_u16_f16 v1.l, v128.l // GFX1250: v_cvt_u16_f16_e64 v1.l, v128.l ; encoding: [0x01,0x00,0xd2,0xd5,0x80,0x01,0x00,0x00] @@ -1855,11 +1855,11 @@ v_cvt_u32_f64_e64 v5, -|src_scc| v_cvt_u32_f64_e64 v255, 0xaf123456 clamp // GFX1250: v_cvt_u32_f64_e64 v255, 0xaf123456 clamp ; encoding: [0xff,0x80,0x95,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf] -v_cvt_u32_u16_e64 v5, v1 -// GFX1250: v_cvt_u32_u16_e64 v5, v1 ; encoding: [0x05,0x00,0xeb,0xd5,0x01,0x01,0x00,0x00] +v_cvt_u32_u16_e64 v5, v1.l +// GFX1250: v_cvt_u32_u16_e64 v5, v1.l ; encoding: [0x05,0x00,0xeb,0xd5,0x01,0x01,0x00,0x00] -v_cvt_u32_u16_e64 v5, v255 -// GFX1250: v_cvt_u32_u16_e64 v5, v255 ; encoding: [0x05,0x00,0xeb,0xd5,0xff,0x01,0x00,0x00] +v_cvt_u32_u16_e64 v5, v255.l +// GFX1250: v_cvt_u32_u16_e64 v5, v255.l ; encoding: [0x05,0x00,0xeb,0xd5,0xff,0x01,0x00,0x00] v_cvt_u32_u16_e64 v5, s1 // GFX1250: v_cvt_u32_u16_e64 v5, s1 ; encoding: [0x05,0x00,0xeb,0xd5,0x01,0x00,0x00,0x00] @@ -1906,50 +1906,50 @@ v_cvt_u32_u16 v1, v128.l v_cvt_u32_u16 v1, v128.h // GFX1250: v_cvt_u32_u16_e64 v1, v128.h op_sel:[1,0] ; encoding: [0x01,0x08,0xeb,0xd5,0x80,0x01,0x00,0x00] -v_exp_f16_e64 v5, v1 -// GFX1250: v_exp_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd8,0xd5,0x01,0x01,0x00,0x00] +v_exp_f16_e64 v5.l, v1.l +// GFX1250: v_exp_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd8,0xd5,0x01,0x01,0x00,0x00] -v_exp_f16_e64 v5, v255 -// GFX1250: v_exp_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd8,0xd5,0xff,0x01,0x00,0x00] +v_exp_f16_e64 v5.l, v255.l +// GFX1250: v_exp_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd8,0xd5,0xff,0x01,0x00,0x00] -v_exp_f16_e64 v5, s1 -// GFX1250: v_exp_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd8,0xd5,0x01,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, s1 +// GFX1250: v_exp_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd8,0xd5,0x01,0x00,0x00,0x00] -v_exp_f16_e64 v5, s105 -// GFX1250: v_exp_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd8,0xd5,0x69,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, s105 +// GFX1250: v_exp_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd8,0xd5,0x69,0x00,0x00,0x00] -v_exp_f16_e64 v5, vcc_lo -// GFX1250: v_exp_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd8,0xd5,0x6a,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, vcc_lo +// GFX1250: v_exp_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd8,0xd5,0x6a,0x00,0x00,0x00] -v_exp_f16_e64 v5, vcc_hi -// GFX1250: v_exp_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd8,0xd5,0x6b,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, vcc_hi +// GFX1250: v_exp_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd8,0xd5,0x6b,0x00,0x00,0x00] -v_exp_f16_e64 v5, ttmp15 -// GFX1250: v_exp_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd8,0xd5,0x7b,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, ttmp15 +// GFX1250: v_exp_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd8,0xd5,0x7b,0x00,0x00,0x00] -v_exp_f16_e64 v5, m0 -// GFX1250: v_exp_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd8,0xd5,0x7d,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, m0 +// GFX1250: v_exp_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd8,0xd5,0x7d,0x00,0x00,0x00] -v_exp_f16_e64 v5, exec_lo -// GFX1250: v_exp_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd8,0xd5,0x7e,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, exec_lo +// GFX1250: v_exp_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd8,0xd5,0x7e,0x00,0x00,0x00] -v_exp_f16_e64 v5, exec_hi -// GFX1250: v_exp_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd8,0xd5,0x7f,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, exec_hi +// GFX1250: v_exp_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd8,0xd5,0x7f,0x00,0x00,0x00] -v_exp_f16_e64 v5, null -// GFX1250: v_exp_f16_e64 v5, null ; encoding: [0x05,0x00,0xd8,0xd5,0x7c,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, null +// GFX1250: v_exp_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd8,0xd5,0x7c,0x00,0x00,0x00] -v_exp_f16_e64 v5, -1 -// GFX1250: v_exp_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd8,0xd5,0xc1,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, -1 +// GFX1250: v_exp_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd8,0xd5,0xc1,0x00,0x00,0x00] -v_exp_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_exp_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd8,0xd5,0xf0,0x00,0x00,0x08] +v_exp_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_exp_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd8,0xd5,0xf0,0x00,0x00,0x08] -v_exp_f16_e64 v5, src_scc mul:4 -// GFX1250: v_exp_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd8,0xd5,0xfd,0x00,0x00,0x10] +v_exp_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_exp_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd8,0xd5,0xfd,0x00,0x00,0x10] -v_exp_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_exp_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd8,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_exp_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_exp_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd8,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_exp_f16 v1.h, v128.l // GFX1250: v_exp_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd8,0xd5,0x80,0x01,0x00,0x00] @@ -2137,50 +2137,50 @@ v_ffbl_b32_e64 v5, src_scc v_ffbl_b32_e64 v255, 0xaf123456 // GFX1250: v_ctz_i32_b32_e64 v255, 0xaf123456 ; encoding: [0xff,0x00,0xba,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf] -v_floor_f16_e64 v5, v1 -// GFX1250: v_floor_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xdb,0xd5,0x01,0x01,0x00,0x00] +v_floor_f16_e64 v5.l, v1.l +// GFX1250: v_floor_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xdb,0xd5,0x01,0x01,0x00,0x00] -v_floor_f16_e64 v5, v255 -// GFX1250: v_floor_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xdb,0xd5,0xff,0x01,0x00,0x00] +v_floor_f16_e64 v5.l, v255.l +// GFX1250: v_floor_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xdb,0xd5,0xff,0x01,0x00,0x00] -v_floor_f16_e64 v5, s1 -// GFX1250: v_floor_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xdb,0xd5,0x01,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, s1 +// GFX1250: v_floor_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xdb,0xd5,0x01,0x00,0x00,0x00] -v_floor_f16_e64 v5, s105 -// GFX1250: v_floor_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xdb,0xd5,0x69,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, s105 +// GFX1250: v_floor_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xdb,0xd5,0x69,0x00,0x00,0x00] -v_floor_f16_e64 v5, vcc_lo -// GFX1250: v_floor_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xdb,0xd5,0x6a,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, vcc_lo +// GFX1250: v_floor_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xdb,0xd5,0x6a,0x00,0x00,0x00] -v_floor_f16_e64 v5, vcc_hi -// GFX1250: v_floor_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xdb,0xd5,0x6b,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, vcc_hi +// GFX1250: v_floor_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xdb,0xd5,0x6b,0x00,0x00,0x00] -v_floor_f16_e64 v5, ttmp15 -// GFX1250: v_floor_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xdb,0xd5,0x7b,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, ttmp15 +// GFX1250: v_floor_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xdb,0xd5,0x7b,0x00,0x00,0x00] -v_floor_f16_e64 v5, m0 -// GFX1250: v_floor_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xdb,0xd5,0x7d,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, m0 +// GFX1250: v_floor_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xdb,0xd5,0x7d,0x00,0x00,0x00] -v_floor_f16_e64 v5, exec_lo -// GFX1250: v_floor_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xdb,0xd5,0x7e,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, exec_lo +// GFX1250: v_floor_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xdb,0xd5,0x7e,0x00,0x00,0x00] -v_floor_f16_e64 v5, exec_hi -// GFX1250: v_floor_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xdb,0xd5,0x7f,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, exec_hi +// GFX1250: v_floor_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xdb,0xd5,0x7f,0x00,0x00,0x00] -v_floor_f16_e64 v5, null -// GFX1250: v_floor_f16_e64 v5, null ; encoding: [0x05,0x00,0xdb,0xd5,0x7c,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, null +// GFX1250: v_floor_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xdb,0xd5,0x7c,0x00,0x00,0x00] -v_floor_f16_e64 v5, -1 -// GFX1250: v_floor_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xdb,0xd5,0xc1,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, -1 +// GFX1250: v_floor_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xdb,0xd5,0xc1,0x00,0x00,0x00] -v_floor_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_floor_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xdb,0xd5,0xf0,0x00,0x00,0x08] +v_floor_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_floor_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xdb,0xd5,0xf0,0x00,0x00,0x08] -v_floor_f16_e64 v5, src_scc mul:4 -// GFX1250: v_floor_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xdb,0xd5,0xfd,0x00,0x00,0x10] +v_floor_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_floor_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xdb,0xd5,0xfd,0x00,0x00,0x10] -v_floor_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_floor_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdb,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_floor_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_floor_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdb,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_floor_f16 v1.h, v128.l // GFX1250: v_floor_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xdb,0xd5,0x80,0x01,0x00,0x00] @@ -2269,50 +2269,50 @@ v_floor_f64_e64 v[6:7], -|src_scc| mul:4 v_floor_f64_e64 v[254:255], 0xaf123456 clamp div:2 // GFX1250: v_floor_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0x9a,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf] -v_fract_f16_e64 v5, v1 -// GFX1250: v_fract_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xdf,0xd5,0x01,0x01,0x00,0x00] +v_fract_f16_e64 v5.l, v1.l +// GFX1250: v_fract_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xdf,0xd5,0x01,0x01,0x00,0x00] -v_fract_f16_e64 v5, v255 -// GFX1250: v_fract_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xdf,0xd5,0xff,0x01,0x00,0x00] +v_fract_f16_e64 v5.l, v255.l +// GFX1250: v_fract_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xdf,0xd5,0xff,0x01,0x00,0x00] -v_fract_f16_e64 v5, s1 -// GFX1250: v_fract_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xdf,0xd5,0x01,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, s1 +// GFX1250: v_fract_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xdf,0xd5,0x01,0x00,0x00,0x00] -v_fract_f16_e64 v5, s105 -// GFX1250: v_fract_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xdf,0xd5,0x69,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, s105 +// GFX1250: v_fract_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xdf,0xd5,0x69,0x00,0x00,0x00] -v_fract_f16_e64 v5, vcc_lo -// GFX1250: v_fract_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xdf,0xd5,0x6a,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, vcc_lo +// GFX1250: v_fract_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xdf,0xd5,0x6a,0x00,0x00,0x00] -v_fract_f16_e64 v5, vcc_hi -// GFX1250: v_fract_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xdf,0xd5,0x6b,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, vcc_hi +// GFX1250: v_fract_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xdf,0xd5,0x6b,0x00,0x00,0x00] -v_fract_f16_e64 v5, ttmp15 -// GFX1250: v_fract_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xdf,0xd5,0x7b,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, ttmp15 +// GFX1250: v_fract_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xdf,0xd5,0x7b,0x00,0x00,0x00] -v_fract_f16_e64 v5, m0 -// GFX1250: v_fract_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xdf,0xd5,0x7d,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, m0 +// GFX1250: v_fract_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xdf,0xd5,0x7d,0x00,0x00,0x00] -v_fract_f16_e64 v5, exec_lo -// GFX1250: v_fract_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xdf,0xd5,0x7e,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, exec_lo +// GFX1250: v_fract_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xdf,0xd5,0x7e,0x00,0x00,0x00] -v_fract_f16_e64 v5, exec_hi -// GFX1250: v_fract_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xdf,0xd5,0x7f,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, exec_hi +// GFX1250: v_fract_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xdf,0xd5,0x7f,0x00,0x00,0x00] -v_fract_f16_e64 v5, null -// GFX1250: v_fract_f16_e64 v5, null ; encoding: [0x05,0x00,0xdf,0xd5,0x7c,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, null +// GFX1250: v_fract_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xdf,0xd5,0x7c,0x00,0x00,0x00] -v_fract_f16_e64 v5, -1 -// GFX1250: v_fract_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xdf,0xd5,0xc1,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, -1 +// GFX1250: v_fract_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xdf,0xd5,0xc1,0x00,0x00,0x00] -v_fract_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_fract_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xdf,0xd5,0xf0,0x00,0x00,0x08] +v_fract_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_fract_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xdf,0xd5,0xf0,0x00,0x00,0x08] -v_fract_f16_e64 v5, src_scc mul:4 -// GFX1250: v_fract_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xdf,0xd5,0xfd,0x00,0x00,0x10] +v_fract_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_fract_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xdf,0xd5,0xfd,0x00,0x00,0x10] -v_fract_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_fract_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdf,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_fract_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_fract_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdf,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_fract_f16 v1.h, v128.l // GFX1250: v_fract_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xdf,0xd5,0x80,0x01,0x00,0x00] @@ -2401,50 +2401,50 @@ v_fract_f64_e64 v[6:7], -|src_scc| mul:4 v_fract_f64_e64 v[254:255], 0xaf123456 clamp div:2 // GFX1250: v_fract_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0xbe,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf] -v_frexp_exp_i16_f16_e64 v5, v1 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xda,0xd5,0x01,0x01,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, v1.l +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xda,0xd5,0x01,0x01,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, v255 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xda,0xd5,0xff,0x01,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, v255.l +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xda,0xd5,0xff,0x01,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, s1 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xda,0xd5,0x01,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, s1 +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xda,0xd5,0x01,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, s105 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xda,0xd5,0x69,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, s105 +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xda,0xd5,0x69,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, vcc_lo -// GFX1250: v_frexp_exp_i16_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xda,0xd5,0x6a,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, vcc_lo +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xda,0xd5,0x6a,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, vcc_hi -// GFX1250: v_frexp_exp_i16_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xda,0xd5,0x6b,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, vcc_hi +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xda,0xd5,0x6b,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, ttmp15 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xda,0xd5,0x7b,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, ttmp15 +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xda,0xd5,0x7b,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, m0 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xda,0xd5,0x7d,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, m0 +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xda,0xd5,0x7d,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, exec_lo -// GFX1250: v_frexp_exp_i16_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xda,0xd5,0x7e,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, exec_lo +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xda,0xd5,0x7e,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, exec_hi -// GFX1250: v_frexp_exp_i16_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xda,0xd5,0x7f,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, exec_hi +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xda,0xd5,0x7f,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, null -// GFX1250: v_frexp_exp_i16_f16_e64 v5, null ; encoding: [0x05,0x00,0xda,0xd5,0x7c,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, null +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xda,0xd5,0x7c,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, -1 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xda,0xd5,0xc1,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, -1 +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xda,0xd5,0xc1,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, 0.5 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xda,0xd5,0xf0,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, 0.5 +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xda,0xd5,0xf0,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, src_scc -// GFX1250: v_frexp_exp_i16_f16_e64 v5, src_scc ; encoding: [0x05,0x00,0xda,0xd5,0xfd,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, src_scc +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xda,0xd5,0xfd,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v255, -|0xfe0b| -// GFX1250: v_frexp_exp_i16_f16_e64 v255, -|0xfe0b| ; encoding: [0xff,0x01,0xda,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] +v_frexp_exp_i16_f16_e64 v255.l, -|0xfe0b| +// GFX1250: v_frexp_exp_i16_f16_e64 v255.l, -|0xfe0b| ; encoding: [0xff,0x01,0xda,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] v_frexp_exp_i16_f16 v1.h, v128.l // GFX1250: v_frexp_exp_i16_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xda,0xd5,0x80,0x01,0x00,0x00] @@ -2533,50 +2533,50 @@ v_frexp_exp_i32_f64_e64 v5, -|src_scc| v_frexp_exp_i32_f64_e64 v255, 0xaf123456 // GFX1250: v_frexp_exp_i32_f64_e64 v255, 0xaf123456 ; encoding: [0xff,0x00,0xbc,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf] -v_frexp_mant_f16_e64 v5, v1 -// GFX1250: v_frexp_mant_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd9,0xd5,0x01,0x01,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, v1.l +// GFX1250: v_frexp_mant_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd9,0xd5,0x01,0x01,0x00,0x00] -v_frexp_mant_f16_e64 v5, v255 -// GFX1250: v_frexp_mant_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd9,0xd5,0xff,0x01,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, v255.l +// GFX1250: v_frexp_mant_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd9,0xd5,0xff,0x01,0x00,0x00] -v_frexp_mant_f16_e64 v5, s1 -// GFX1250: v_frexp_mant_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd9,0xd5,0x01,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, s1 +// GFX1250: v_frexp_mant_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd9,0xd5,0x01,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, s105 -// GFX1250: v_frexp_mant_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd9,0xd5,0x69,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, s105 +// GFX1250: v_frexp_mant_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd9,0xd5,0x69,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, vcc_lo -// GFX1250: v_frexp_mant_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd9,0xd5,0x6a,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, vcc_lo +// GFX1250: v_frexp_mant_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd9,0xd5,0x6a,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, vcc_hi -// GFX1250: v_frexp_mant_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd9,0xd5,0x6b,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, vcc_hi +// GFX1250: v_frexp_mant_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd9,0xd5,0x6b,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, ttmp15 -// GFX1250: v_frexp_mant_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd9,0xd5,0x7b,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, ttmp15 +// GFX1250: v_frexp_mant_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd9,0xd5,0x7b,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, m0 -// GFX1250: v_frexp_mant_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd9,0xd5,0x7d,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, m0 +// GFX1250: v_frexp_mant_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd9,0xd5,0x7d,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, exec_lo -// GFX1250: v_frexp_mant_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd9,0xd5,0x7e,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, exec_lo +// GFX1250: v_frexp_mant_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd9,0xd5,0x7e,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, exec_hi -// GFX1250: v_frexp_mant_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd9,0xd5,0x7f,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, exec_hi +// GFX1250: v_frexp_mant_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd9,0xd5,0x7f,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, null -// GFX1250: v_frexp_mant_f16_e64 v5, null ; encoding: [0x05,0x00,0xd9,0xd5,0x7c,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, null +// GFX1250: v_frexp_mant_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd9,0xd5,0x7c,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, -1 -// GFX1250: v_frexp_mant_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd9,0xd5,0xc1,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, -1 +// GFX1250: v_frexp_mant_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd9,0xd5,0xc1,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_frexp_mant_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd9,0xd5,0xf0,0x00,0x00,0x08] +v_frexp_mant_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_frexp_mant_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd9,0xd5,0xf0,0x00,0x00,0x08] -v_frexp_mant_f16_e64 v5, src_scc mul:4 -// GFX1250: v_frexp_mant_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd9,0xd5,0xfd,0x00,0x00,0x10] +v_frexp_mant_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_frexp_mant_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd9,0xd5,0xfd,0x00,0x00,0x10] -v_frexp_mant_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_frexp_mant_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd9,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_frexp_mant_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_frexp_mant_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd9,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_frexp_mant_f16 v1.h, v128.l // GFX1250: v_frexp_mant_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd9,0xd5,0x80,0x01,0x00,0x00] @@ -2665,50 +2665,50 @@ v_frexp_mant_f64_e64 v[6:7], -|src_scc| mul:4 v_frexp_mant_f64_e64 v[254:255], 0xaf123456 clamp div:2 // GFX1250: v_frexp_mant_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0xbd,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf] -v_log_f16_e64 v5, v1 -// GFX1250: v_log_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd7,0xd5,0x01,0x01,0x00,0x00] +v_log_f16_e64 v5.l, v1.l +// GFX1250: v_log_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd7,0xd5,0x01,0x01,0x00,0x00] -v_log_f16_e64 v5, v255 -// GFX1250: v_log_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd7,0xd5,0xff,0x01,0x00,0x00] +v_log_f16_e64 v5.l, v255.l +// GFX1250: v_log_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd7,0xd5,0xff,0x01,0x00,0x00] -v_log_f16_e64 v5, s1 -// GFX1250: v_log_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd7,0xd5,0x01,0x00,0x00,0x00] +v_log_f16_e64 v5.l, s1 +// GFX1250: v_log_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd7,0xd5,0x01,0x00,0x00,0x00] -v_log_f16_e64 v5, s105 -// GFX1250: v_log_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd7,0xd5,0x69,0x00,0x00,0x00] +v_log_f16_e64 v5.l, s105 +// GFX1250: v_log_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd7,0xd5,0x69,0x00,0x00,0x00] -v_log_f16_e64 v5, vcc_lo -// GFX1250: v_log_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd7,0xd5,0x6a,0x00,0x00,0x00] +v_log_f16_e64 v5.l, vcc_lo +// GFX1250: v_log_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd7,0xd5,0x6a,0x00,0x00,0x00] -v_log_f16_e64 v5, vcc_hi -// GFX1250: v_log_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd7,0xd5,0x6b,0x00,0x00,0x00] +v_log_f16_e64 v5.l, vcc_hi +// GFX1250: v_log_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd7,0xd5,0x6b,0x00,0x00,0x00] -v_log_f16_e64 v5, ttmp15 -// GFX1250: v_log_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd7,0xd5,0x7b,0x00,0x00,0x00] +v_log_f16_e64 v5.l, ttmp15 +// GFX1250: v_log_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd7,0xd5,0x7b,0x00,0x00,0x00] -v_log_f16_e64 v5, m0 -// GFX1250: v_log_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd7,0xd5,0x7d,0x00,0x00,0x00] +v_log_f16_e64 v5.l, m0 +// GFX1250: v_log_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd7,0xd5,0x7d,0x00,0x00,0x00] -v_log_f16_e64 v5, exec_lo -// GFX1250: v_log_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd7,0xd5,0x7e,0x00,0x00,0x00] +v_log_f16_e64 v5.l, exec_lo +// GFX1250: v_log_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd7,0xd5,0x7e,0x00,0x00,0x00] -v_log_f16_e64 v5, exec_hi -// GFX1250: v_log_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd7,0xd5,0x7f,0x00,0x00,0x00] +v_log_f16_e64 v5.l, exec_hi +// GFX1250: v_log_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd7,0xd5,0x7f,0x00,0x00,0x00] -v_log_f16_e64 v5, null -// GFX1250: v_log_f16_e64 v5, null ; encoding: [0x05,0x00,0xd7,0xd5,0x7c,0x00,0x00,0x00] +v_log_f16_e64 v5.l, null +// GFX1250: v_log_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd7,0xd5,0x7c,0x00,0x00,0x00] -v_log_f16_e64 v5, -1 -// GFX1250: v_log_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd7,0xd5,0xc1,0x00,0x00,0x00] +v_log_f16_e64 v5.l, -1 +// GFX1250: v_log_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd7,0xd5,0xc1,0x00,0x00,0x00] -v_log_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_log_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd7,0xd5,0xf0,0x00,0x00,0x08] +v_log_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_log_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd7,0xd5,0xf0,0x00,0x00,0x08] -v_log_f16_e64 v5, src_scc mul:4 -// GFX1250: v_log_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd7,0xd5,0xfd,0x00,0x00,0x10] +v_log_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_log_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd7,0xd5,0xfd,0x00,0x00,0x10] -v_log_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_log_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd7,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_log_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_log_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd7,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_log_f16 v1.h, v128.l // GFX1250: v_log_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd7,0xd5,0x80,0x01,0x00,0x00] @@ -2872,50 +2872,50 @@ v_movrelsd_b32_e64 v255, v255 v_nop_e64 // GFX1250: v_nop ; encoding: [0x00,0x00,0x80,0xd5,0x00,0x00,0x00,0x00] -v_not_b16_e64 v5, v1 -// GFX1250: v_not_b16_e64 v5, v1 ; encoding: [0x05,0x00,0xe9,0xd5,0x01,0x01,0x00,0x00] +v_not_b16_e64 v5.l, v1.l +// GFX1250: v_not_b16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xe9,0xd5,0x01,0x01,0x00,0x00] -v_not_b16_e64 v5, v255 -// GFX1250: v_not_b16_e64 v5, v255 ; encoding: [0x05,0x00,0xe9,0xd5,0xff,0x01,0x00,0x00] +v_not_b16_e64 v5.l, v255.l +// GFX1250: v_not_b16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xe9,0xd5,0xff,0x01,0x00,0x00] -v_not_b16_e64 v5, s1 -// GFX1250: v_not_b16_e64 v5, s1 ; encoding: [0x05,0x00,0xe9,0xd5,0x01,0x00,0x00,0x00] +v_not_b16_e64 v5.l, s1 +// GFX1250: v_not_b16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe9,0xd5,0x01,0x00,0x00,0x00] -v_not_b16_e64 v5, s105 -// GFX1250: v_not_b16_e64 v5, s105 ; encoding: [0x05,0x00,0xe9,0xd5,0x69,0x00,0x00,0x00] +v_not_b16_e64 v5.l, s105 +// GFX1250: v_not_b16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe9,0xd5,0x69,0x00,0x00,0x00] -v_not_b16_e64 v5, vcc_lo -// GFX1250: v_not_b16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe9,0xd5,0x6a,0x00,0x00,0x00] +v_not_b16_e64 v5.l, vcc_lo +// GFX1250: v_not_b16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe9,0xd5,0x6a,0x00,0x00,0x00] -v_not_b16_e64 v5, vcc_hi -// GFX1250: v_not_b16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe9,0xd5,0x6b,0x00,0x00,0x00] +v_not_b16_e64 v5.l, vcc_hi +// GFX1250: v_not_b16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe9,0xd5,0x6b,0x00,0x00,0x00] -v_not_b16_e64 v5, ttmp15 -// GFX1250: v_not_b16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe9,0xd5,0x7b,0x00,0x00,0x00] +v_not_b16_e64 v5.l, ttmp15 +// GFX1250: v_not_b16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe9,0xd5,0x7b,0x00,0x00,0x00] -v_not_b16_e64 v5, m0 -// GFX1250: v_not_b16_e64 v5, m0 ; encoding: [0x05,0x00,0xe9,0xd5,0x7d,0x00,0x00,0x00] +v_not_b16_e64 v5.l, m0 +// GFX1250: v_not_b16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe9,0xd5,0x7d,0x00,0x00,0x00] -v_not_b16_e64 v5, exec_lo -// GFX1250: v_not_b16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe9,0xd5,0x7e,0x00,0x00,0x00] +v_not_b16_e64 v5.l, exec_lo +// GFX1250: v_not_b16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe9,0xd5,0x7e,0x00,0x00,0x00] -v_not_b16_e64 v5, exec_hi -// GFX1250: v_not_b16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe9,0xd5,0x7f,0x00,0x00,0x00] +v_not_b16_e64 v5.l, exec_hi +// GFX1250: v_not_b16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe9,0xd5,0x7f,0x00,0x00,0x00] -v_not_b16_e64 v5, null -// GFX1250: v_not_b16_e64 v5, null ; encoding: [0x05,0x00,0xe9,0xd5,0x7c,0x00,0x00,0x00] +v_not_b16_e64 v5.l, null +// GFX1250: v_not_b16_e64 v5.l, null ; encoding: [0x05,0x00,0xe9,0xd5,0x7c,0x00,0x00,0x00] -v_not_b16_e64 v5, -1 -// GFX1250: v_not_b16_e64 v5, -1 ; encoding: [0x05,0x00,0xe9,0xd5,0xc1,0x00,0x00,0x00] +v_not_b16_e64 v5.l, -1 +// GFX1250: v_not_b16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe9,0xd5,0xc1,0x00,0x00,0x00] -v_not_b16_e64 v5, 0.5 -// GFX1250: v_not_b16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xe9,0xd5,0xf0,0x00,0x00,0x00] +v_not_b16_e64 v5.l, 0.5 +// GFX1250: v_not_b16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xe9,0xd5,0xf0,0x00,0x00,0x00] -v_not_b16_e64 v5, src_scc -// GFX1250: v_not_b16_e64 v5, src_scc ; encoding: [0x05,0x00,0xe9,0xd5,0xfd,0x00,0x00,0x00] +v_not_b16_e64 v5.l, src_scc +// GFX1250: v_not_b16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xe9,0xd5,0xfd,0x00,0x00,0x00] -v_not_b16_e64 v255, 0xfe0b -// GFX1250: v_not_b16_e64 v255, 0xfe0b ; encoding: [0xff,0x00,0xe9,0xd5,0xff,0x00,0x00,0x00,0x0b,0xfe,0x00,0x00] +v_not_b16_e64 v255.l, 0xfe0b +// GFX1250: v_not_b16_e64 v255.l, 0xfe0b ; encoding: [0xff,0x00,0xe9,0xd5,0xff,0x00,0x00,0x00,0x0b,0xfe,0x00,0x00] v_not_b16 v1.h, v128.l // GFX1250: v_not_b16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xe9,0xd5,0x80,0x01,0x00,0x00] @@ -2971,50 +2971,50 @@ v_not_b32_e64 v255, 0xaf123456 v_pipeflush_e64 // GFX1250: v_pipeflush ; encoding: [0x00,0x00,0x9b,0xd5,0x00,0x00,0x00,0x00] -v_rcp_f16_e64 v5, v1 -// GFX1250: v_rcp_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd4,0xd5,0x01,0x01,0x00,0x00] +v_rcp_f16_e64 v5.l, v1.l +// GFX1250: v_rcp_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd4,0xd5,0x01,0x01,0x00,0x00] -v_rcp_f16_e64 v5, v255 -// GFX1250: v_rcp_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd4,0xd5,0xff,0x01,0x00,0x00] +v_rcp_f16_e64 v5.l, v255.l +// GFX1250: v_rcp_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd4,0xd5,0xff,0x01,0x00,0x00] -v_rcp_f16_e64 v5, s1 -// GFX1250: v_rcp_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd4,0xd5,0x01,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, s1 +// GFX1250: v_rcp_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd4,0xd5,0x01,0x00,0x00,0x00] -v_rcp_f16_e64 v5, s105 -// GFX1250: v_rcp_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd4,0xd5,0x69,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, s105 +// GFX1250: v_rcp_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd4,0xd5,0x69,0x00,0x00,0x00] -v_rcp_f16_e64 v5, vcc_lo -// GFX1250: v_rcp_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd4,0xd5,0x6a,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, vcc_lo +// GFX1250: v_rcp_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd4,0xd5,0x6a,0x00,0x00,0x00] -v_rcp_f16_e64 v5, vcc_hi -// GFX1250: v_rcp_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd4,0xd5,0x6b,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, vcc_hi +// GFX1250: v_rcp_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd4,0xd5,0x6b,0x00,0x00,0x00] -v_rcp_f16_e64 v5, ttmp15 -// GFX1250: v_rcp_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd4,0xd5,0x7b,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, ttmp15 +// GFX1250: v_rcp_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd4,0xd5,0x7b,0x00,0x00,0x00] -v_rcp_f16_e64 v5, m0 -// GFX1250: v_rcp_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd4,0xd5,0x7d,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, m0 +// GFX1250: v_rcp_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd4,0xd5,0x7d,0x00,0x00,0x00] -v_rcp_f16_e64 v5, exec_lo -// GFX1250: v_rcp_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd4,0xd5,0x7e,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, exec_lo +// GFX1250: v_rcp_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd4,0xd5,0x7e,0x00,0x00,0x00] -v_rcp_f16_e64 v5, exec_hi -// GFX1250: v_rcp_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd4,0xd5,0x7f,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, exec_hi +// GFX1250: v_rcp_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd4,0xd5,0x7f,0x00,0x00,0x00] -v_rcp_f16_e64 v5, null -// GFX1250: v_rcp_f16_e64 v5, null ; encoding: [0x05,0x00,0xd4,0xd5,0x7c,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, null +// GFX1250: v_rcp_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd4,0xd5,0x7c,0x00,0x00,0x00] -v_rcp_f16_e64 v5, -1 -// GFX1250: v_rcp_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd4,0xd5,0xc1,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, -1 +// GFX1250: v_rcp_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd4,0xd5,0xc1,0x00,0x00,0x00] -v_rcp_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_rcp_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd4,0xd5,0xf0,0x00,0x00,0x08] +v_rcp_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_rcp_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd4,0xd5,0xf0,0x00,0x00,0x08] -v_rcp_f16_e64 v5, src_scc mul:4 -// GFX1250: v_rcp_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd4,0xd5,0xfd,0x00,0x00,0x10] +v_rcp_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_rcp_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd4,0xd5,0xfd,0x00,0x00,0x10] -v_rcp_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_rcp_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd4,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_rcp_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_rcp_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd4,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_rcp_f16 v1.h, v128.l // GFX1250: v_rcp_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd4,0xd5,0x80,0x01,0x00,0x00] @@ -3148,50 +3148,50 @@ v_rcp_iflag_f32_e64 v5, src_scc mul:4 v_rcp_iflag_f32_e64 v255, -|0xaf123456| clamp div:2 // GFX1250: v_rcp_iflag_f32_e64 v255, -|0xaf123456| clamp div:2 ; encoding: [0xff,0x81,0xab,0xd5,0xff,0x00,0x00,0x38,0x56,0x34,0x12,0xaf] -v_rndne_f16_e64 v5, v1 -// GFX1250: v_rndne_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xde,0xd5,0x01,0x01,0x00,0x00] +v_rndne_f16_e64 v5.l, v1.l +// GFX1250: v_rndne_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xde,0xd5,0x01,0x01,0x00,0x00] -v_rndne_f16_e64 v5, v255 -// GFX1250: v_rndne_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xde,0xd5,0xff,0x01,0x00,0x00] +v_rndne_f16_e64 v5.l, v255.l +// GFX1250: v_rndne_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xde,0xd5,0xff,0x01,0x00,0x00] -v_rndne_f16_e64 v5, s1 -// GFX1250: v_rndne_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xde,0xd5,0x01,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, s1 +// GFX1250: v_rndne_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xde,0xd5,0x01,0x00,0x00,0x00] -v_rndne_f16_e64 v5, s105 -// GFX1250: v_rndne_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xde,0xd5,0x69,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, s105 +// GFX1250: v_rndne_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xde,0xd5,0x69,0x00,0x00,0x00] -v_rndne_f16_e64 v5, vcc_lo -// GFX1250: v_rndne_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xde,0xd5,0x6a,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, vcc_lo +// GFX1250: v_rndne_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xde,0xd5,0x6a,0x00,0x00,0x00] -v_rndne_f16_e64 v5, vcc_hi -// GFX1250: v_rndne_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xde,0xd5,0x6b,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, vcc_hi +// GFX1250: v_rndne_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xde,0xd5,0x6b,0x00,0x00,0x00] -v_rndne_f16_e64 v5, ttmp15 -// GFX1250: v_rndne_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xde,0xd5,0x7b,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, ttmp15 +// GFX1250: v_rndne_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xde,0xd5,0x7b,0x00,0x00,0x00] -v_rndne_f16_e64 v5, m0 -// GFX1250: v_rndne_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xde,0xd5,0x7d,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, m0 +// GFX1250: v_rndne_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xde,0xd5,0x7d,0x00,0x00,0x00] -v_rndne_f16_e64 v5, exec_lo -// GFX1250: v_rndne_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xde,0xd5,0x7e,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, exec_lo +// GFX1250: v_rndne_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xde,0xd5,0x7e,0x00,0x00,0x00] -v_rndne_f16_e64 v5, exec_hi -// GFX1250: v_rndne_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xde,0xd5,0x7f,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, exec_hi +// GFX1250: v_rndne_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xde,0xd5,0x7f,0x00,0x00,0x00] -v_rndne_f16_e64 v5, null -// GFX1250: v_rndne_f16_e64 v5, null ; encoding: [0x05,0x00,0xde,0xd5,0x7c,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, null +// GFX1250: v_rndne_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xde,0xd5,0x7c,0x00,0x00,0x00] -v_rndne_f16_e64 v5, -1 -// GFX1250: v_rndne_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xde,0xd5,0xc1,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, -1 +// GFX1250: v_rndne_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xde,0xd5,0xc1,0x00,0x00,0x00] -v_rndne_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_rndne_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xde,0xd5,0xf0,0x00,0x00,0x08] +v_rndne_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_rndne_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xde,0xd5,0xf0,0x00,0x00,0x08] -v_rndne_f16_e64 v5, src_scc mul:4 -// GFX1250: v_rndne_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xde,0xd5,0xfd,0x00,0x00,0x10] +v_rndne_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_rndne_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xde,0xd5,0xfd,0x00,0x00,0x10] -v_rndne_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_rndne_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xde,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_rndne_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_rndne_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xde,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_rndne_f16 v1.h, v128.l // GFX1250: v_rndne_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xde,0xd5,0x80,0x01,0x00,0x00] @@ -3280,50 +3280,50 @@ v_rndne_f64_e64 v[6:7], -|src_scc| mul:4 v_rndne_f64_e64 v[254:255], 0xaf123456 clamp div:2 // GFX1250: v_rndne_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0x99,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf] -v_rsq_f16_e64 v5, v1 -// GFX1250: v_rsq_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd6,0xd5,0x01,0x01,0x00,0x00] +v_rsq_f16_e64 v5.l, v1.l +// GFX1250: v_rsq_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd6,0xd5,0x01,0x01,0x00,0x00] -v_rsq_f16_e64 v5, v255 -// GFX1250: v_rsq_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd6,0xd5,0xff,0x01,0x00,0x00] +v_rsq_f16_e64 v5.l, v255.l +// GFX1250: v_rsq_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd6,0xd5,0xff,0x01,0x00,0x00] -v_rsq_f16_e64 v5, s1 -// GFX1250: v_rsq_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd6,0xd5,0x01,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, s1 +// GFX1250: v_rsq_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd6,0xd5,0x01,0x00,0x00,0x00] -v_rsq_f16_e64 v5, s105 -// GFX1250: v_rsq_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd6,0xd5,0x69,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, s105 +// GFX1250: v_rsq_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd6,0xd5,0x69,0x00,0x00,0x00] -v_rsq_f16_e64 v5, vcc_lo -// GFX1250: v_rsq_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd6,0xd5,0x6a,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, vcc_lo +// GFX1250: v_rsq_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd6,0xd5,0x6a,0x00,0x00,0x00] -v_rsq_f16_e64 v5, vcc_hi -// GFX1250: v_rsq_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd6,0xd5,0x6b,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, vcc_hi +// GFX1250: v_rsq_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd6,0xd5,0x6b,0x00,0x00,0x00] -v_rsq_f16_e64 v5, ttmp15 -// GFX1250: v_rsq_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd6,0xd5,0x7b,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, ttmp15 +// GFX1250: v_rsq_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd6,0xd5,0x7b,0x00,0x00,0x00] -v_rsq_f16_e64 v5, m0 -// GFX1250: v_rsq_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd6,0xd5,0x7d,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, m0 +// GFX1250: v_rsq_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd6,0xd5,0x7d,0x00,0x00,0x00] -v_rsq_f16_e64 v5, exec_lo -// GFX1250: v_rsq_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd6,0xd5,0x7e,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, exec_lo +// GFX1250: v_rsq_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd6,0xd5,0x7e,0x00,0x00,0x00] -v_rsq_f16_e64 v5, exec_hi -// GFX1250: v_rsq_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd6,0xd5,0x7f,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, exec_hi +// GFX1250: v_rsq_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd6,0xd5,0x7f,0x00,0x00,0x00] -v_rsq_f16_e64 v5, null -// GFX1250: v_rsq_f16_e64 v5, null ; encoding: [0x05,0x00,0xd6,0xd5,0x7c,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, null +// GFX1250: v_rsq_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd6,0xd5,0x7c,0x00,0x00,0x00] -v_rsq_f16_e64 v5, -1 -// GFX1250: v_rsq_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd6,0xd5,0xc1,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, -1 +// GFX1250: v_rsq_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd6,0xd5,0xc1,0x00,0x00,0x00] -v_rsq_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_rsq_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd6,0xd5,0xf0,0x00,0x00,0x08] +v_rsq_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_rsq_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd6,0xd5,0xf0,0x00,0x00,0x08] -v_rsq_f16_e64 v5, src_scc mul:4 -// GFX1250: v_rsq_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd6,0xd5,0xfd,0x00,0x00,0x10] +v_rsq_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_rsq_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd6,0xd5,0xfd,0x00,0x00,0x10] -v_rsq_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_rsq_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd6,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_rsq_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_rsq_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd6,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_rsq_f16 v1.h, v128.l // GFX1250: v_rsq_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd6,0xd5,0x80,0x01,0x00,0x00] @@ -3412,50 +3412,50 @@ v_rsq_f64_e64 v[6:7], -|src_scc| mul:4 v_rsq_f64_e64 v[254:255], 0xaf123456 clamp div:2 // GFX1250: v_rsq_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0xb1,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf] -v_sat_pk_u8_i16_e64 v5, v1 -// GFX1250: v_sat_pk_u8_i16_e64 v5, v1 ; encoding: [0x05,0x00,0xe2,0xd5,0x01,0x01,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, v1 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, v1 ; encoding: [0x05,0x00,0xe2,0xd5,0x01,0x01,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, v255 -// GFX1250: v_sat_pk_u8_i16_e64 v5, v255 ; encoding: [0x05,0x00,0xe2,0xd5,0xff,0x01,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, v255 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, v255 ; encoding: [0x05,0x00,0xe2,0xd5,0xff,0x01,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, s1 -// GFX1250: v_sat_pk_u8_i16_e64 v5, s1 ; encoding: [0x05,0x00,0xe2,0xd5,0x01,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, s1 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe2,0xd5,0x01,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, s105 -// GFX1250: v_sat_pk_u8_i16_e64 v5, s105 ; encoding: [0x05,0x00,0xe2,0xd5,0x69,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, s105 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe2,0xd5,0x69,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, vcc_lo -// GFX1250: v_sat_pk_u8_i16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe2,0xd5,0x6a,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, vcc_lo +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe2,0xd5,0x6a,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, vcc_hi -// GFX1250: v_sat_pk_u8_i16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe2,0xd5,0x6b,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, vcc_hi +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe2,0xd5,0x6b,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, ttmp15 -// GFX1250: v_sat_pk_u8_i16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe2,0xd5,0x7b,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, ttmp15 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe2,0xd5,0x7b,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, m0 -// GFX1250: v_sat_pk_u8_i16_e64 v5, m0 ; encoding: [0x05,0x00,0xe2,0xd5,0x7d,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, m0 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe2,0xd5,0x7d,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, exec_lo -// GFX1250: v_sat_pk_u8_i16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe2,0xd5,0x7e,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, exec_lo +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe2,0xd5,0x7e,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, exec_hi -// GFX1250: v_sat_pk_u8_i16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe2,0xd5,0x7f,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, exec_hi +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe2,0xd5,0x7f,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, null -// GFX1250: v_sat_pk_u8_i16_e64 v5, null ; encoding: [0x05,0x00,0xe2,0xd5,0x7c,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, null +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, null ; encoding: [0x05,0x00,0xe2,0xd5,0x7c,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, -1 -// GFX1250: v_sat_pk_u8_i16_e64 v5, -1 ; encoding: [0x05,0x00,0xe2,0xd5,0xc1,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, -1 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe2,0xd5,0xc1,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, 0.5 -// GFX1250: v_sat_pk_u8_i16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xe2,0xd5,0xf0,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, 0.5 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xe2,0xd5,0xf0,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, src_scc -// GFX1250: v_sat_pk_u8_i16_e64 v5, src_scc ; encoding: [0x05,0x00,0xe2,0xd5,0xfd,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, src_scc +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xe2,0xd5,0xfd,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v255, 0xfe0b -// GFX1250: v_sat_pk_u8_i16_e64 v255, 0xfe0b ; encoding: [0xff,0x00,0xe2,0xd5,0xff,0x00,0x00,0x00,0x0b,0xfe,0x00,0x00] +v_sat_pk_u8_i16_e64 v255.l, 0xfe0b +// GFX1250: v_sat_pk_u8_i16_e64 v255.l, 0xfe0b ; encoding: [0xff,0x00,0xe2,0xd5,0xff,0x00,0x00,0x00,0x0b,0xfe,0x00,0x00] v_sat_pk_u8_i16 v128.l, v1 // GFX1250: v_sat_pk_u8_i16_e64 v128.l, v1 ; encoding: [0x80,0x00,0xe2,0xd5,0x01,0x01,0x00,0x00] @@ -3463,50 +3463,50 @@ v_sat_pk_u8_i16 v128.l, v1 v_sat_pk_u8_i16 v128.h, v1 // GFX1250: v_sat_pk_u8_i16_e64 v128.h, v1 op_sel:[0,1] ; encoding: [0x80,0x40,0xe2,0xd5,0x01,0x01,0x00,0x00] -v_sin_f16_e64 v5, v1 -// GFX1250: v_sin_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xe0,0xd5,0x01,0x01,0x00,0x00] +v_sin_f16_e64 v5.l, v1.l +// GFX1250: v_sin_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xe0,0xd5,0x01,0x01,0x00,0x00] -v_sin_f16_e64 v5, v255 -// GFX1250: v_sin_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xe0,0xd5,0xff,0x01,0x00,0x00] +v_sin_f16_e64 v5.l, v255.l +// GFX1250: v_sin_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xe0,0xd5,0xff,0x01,0x00,0x00] -v_sin_f16_e64 v5, s1 -// GFX1250: v_sin_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xe0,0xd5,0x01,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, s1 +// GFX1250: v_sin_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe0,0xd5,0x01,0x00,0x00,0x00] -v_sin_f16_e64 v5, s105 -// GFX1250: v_sin_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xe0,0xd5,0x69,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, s105 +// GFX1250: v_sin_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe0,0xd5,0x69,0x00,0x00,0x00] -v_sin_f16_e64 v5, vcc_lo -// GFX1250: v_sin_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe0,0xd5,0x6a,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, vcc_lo +// GFX1250: v_sin_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe0,0xd5,0x6a,0x00,0x00,0x00] -v_sin_f16_e64 v5, vcc_hi -// GFX1250: v_sin_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe0,0xd5,0x6b,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, vcc_hi +// GFX1250: v_sin_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe0,0xd5,0x6b,0x00,0x00,0x00] -v_sin_f16_e64 v5, ttmp15 -// GFX1250: v_sin_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe0,0xd5,0x7b,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, ttmp15 +// GFX1250: v_sin_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe0,0xd5,0x7b,0x00,0x00,0x00] -v_sin_f16_e64 v5, m0 -// GFX1250: v_sin_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xe0,0xd5,0x7d,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, m0 +// GFX1250: v_sin_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe0,0xd5,0x7d,0x00,0x00,0x00] -v_sin_f16_e64 v5, exec_lo -// GFX1250: v_sin_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe0,0xd5,0x7e,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, exec_lo +// GFX1250: v_sin_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe0,0xd5,0x7e,0x00,0x00,0x00] -v_sin_f16_e64 v5, exec_hi -// GFX1250: v_sin_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe0,0xd5,0x7f,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, exec_hi +// GFX1250: v_sin_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe0,0xd5,0x7f,0x00,0x00,0x00] -v_sin_f16_e64 v5, null -// GFX1250: v_sin_f16_e64 v5, null ; encoding: [0x05,0x00,0xe0,0xd5,0x7c,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, null +// GFX1250: v_sin_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xe0,0xd5,0x7c,0x00,0x00,0x00] -v_sin_f16_e64 v5, -1 -// GFX1250: v_sin_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xe0,0xd5,0xc1,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, -1 +// GFX1250: v_sin_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe0,0xd5,0xc1,0x00,0x00,0x00] -v_sin_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_sin_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xe0,0xd5,0xf0,0x00,0x00,0x08] +v_sin_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_sin_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xe0,0xd5,0xf0,0x00,0x00,0x08] -v_sin_f16_e64 v5, src_scc mul:4 -// GFX1250: v_sin_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xe0,0xd5,0xfd,0x00,0x00,0x10] +v_sin_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_sin_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xe0,0xd5,0xfd,0x00,0x00,0x10] -v_sin_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_sin_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xe0,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_sin_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_sin_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xe0,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_sin_f16 v1.h, v128.l // GFX1250: v_sin_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xe0,0xd5,0x80,0x01,0x00,0x00] @@ -3559,50 +3559,50 @@ v_sin_f32_e64 v5, src_scc mul:4 v_sin_f32_e64 v255, -|0xaf123456| clamp div:2 // GFX1250: v_sin_f32_e64 v255, -|0xaf123456| clamp div:2 ; encoding: [0xff,0x81,0xb5,0xd5,0xff,0x00,0x00,0x38,0x56,0x34,0x12,0xaf] -v_sqrt_f16_e64 v5, v1 -// GFX1250: v_sqrt_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd5,0xd5,0x01,0x01,0x00,0x00] +v_sqrt_f16_e64 v5.l, v1.l +// GFX1250: v_sqrt_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd5,0xd5,0x01,0x01,0x00,0x00] -v_sqrt_f16_e64 v5, v255 -// GFX1250: v_sqrt_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd5,0xd5,0xff,0x01,0x00,0x00] +v_sqrt_f16_e64 v5.l, v255.l +// GFX1250: v_sqrt_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd5,0xd5,0xff,0x01,0x00,0x00] -v_sqrt_f16_e64 v5, s1 -// GFX1250: v_sqrt_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd5,0xd5,0x01,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, s1 +// GFX1250: v_sqrt_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd5,0xd5,0x01,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, s105 -// GFX1250: v_sqrt_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd5,0xd5,0x69,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, s105 +// GFX1250: v_sqrt_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd5,0xd5,0x69,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, vcc_lo -// GFX1250: v_sqrt_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd5,0xd5,0x6a,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, vcc_lo +// GFX1250: v_sqrt_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd5,0xd5,0x6a,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, vcc_hi -// GFX1250: v_sqrt_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd5,0xd5,0x6b,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, vcc_hi +// GFX1250: v_sqrt_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd5,0xd5,0x6b,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, ttmp15 -// GFX1250: v_sqrt_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd5,0xd5,0x7b,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, ttmp15 +// GFX1250: v_sqrt_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd5,0xd5,0x7b,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, m0 -// GFX1250: v_sqrt_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd5,0xd5,0x7d,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, m0 +// GFX1250: v_sqrt_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd5,0xd5,0x7d,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, exec_lo -// GFX1250: v_sqrt_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd5,0xd5,0x7e,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, exec_lo +// GFX1250: v_sqrt_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd5,0xd5,0x7e,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, exec_hi -// GFX1250: v_sqrt_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd5,0xd5,0x7f,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, exec_hi +// GFX1250: v_sqrt_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd5,0xd5,0x7f,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, null -// GFX1250: v_sqrt_f16_e64 v5, null ; encoding: [0x05,0x00,0xd5,0xd5,0x7c,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, null +// GFX1250: v_sqrt_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd5,0xd5,0x7c,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, -1 -// GFX1250: v_sqrt_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd5,0xd5,0xc1,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, -1 +// GFX1250: v_sqrt_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd5,0xd5,0xc1,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_sqrt_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd5,0xd5,0xf0,0x00,0x00,0x08] +v_sqrt_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_sqrt_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd5,0xd5,0xf0,0x00,0x00,0x08] -v_sqrt_f16_e64 v5, src_scc mul:4 -// GFX1250: v_sqrt_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd5,0xd5,0xfd,0x00,0x00,0x10] +v_sqrt_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_sqrt_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd5,0xd5,0xfd,0x00,0x00,0x10] -v_sqrt_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_sqrt_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd5,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_sqrt_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_sqrt_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd5,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_sqrt_f16 v1.h, v128.l // GFX1250: v_sqrt_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd5,0xd5,0x80,0x01,0x00,0x00] @@ -3691,50 +3691,50 @@ v_sqrt_f64_e64 v[6:7], -|src_scc| mul:4 v_sqrt_f64_e64 v[254:255], 0xaf123456 clamp div:2 // GFX1250: v_sqrt_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0xb4,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf] -v_trunc_f16_e64 v5, v1 -// GFX1250: v_trunc_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xdd,0xd5,0x01,0x01,0x00,0x00] +v_trunc_f16_e64 v5.l, v1.l +// GFX1250: v_trunc_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xdd,0xd5,0x01,0x01,0x00,0x00] -v_trunc_f16_e64 v5, v255 -// GFX1250: v_trunc_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xdd,0xd5,0xff,0x01,0x00,0x00] +v_trunc_f16_e64 v5.l, v255.l +// GFX1250: v_trunc_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xdd,0xd5,0xff,0x01,0x00,0x00] -v_trunc_f16_e64 v5, s1 -// GFX1250: v_trunc_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xdd,0xd5,0x01,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, s1 +// GFX1250: v_trunc_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xdd,0xd5,0x01,0x00,0x00,0x00] -v_trunc_f16_e64 v5, s105 -// GFX1250: v_trunc_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xdd,0xd5,0x69,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, s105 +// GFX1250: v_trunc_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xdd,0xd5,0x69,0x00,0x00,0x00] -v_trunc_f16_e64 v5, vcc_lo -// GFX1250: v_trunc_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xdd,0xd5,0x6a,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, vcc_lo +// GFX1250: v_trunc_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xdd,0xd5,0x6a,0x00,0x00,0x00] -v_trunc_f16_e64 v5, vcc_hi -// GFX1250: v_trunc_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xdd,0xd5,0x6b,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, vcc_hi +// GFX1250: v_trunc_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xdd,0xd5,0x6b,0x00,0x00,0x00] -v_trunc_f16_e64 v5, ttmp15 -// GFX1250: v_trunc_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xdd,0xd5,0x7b,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, ttmp15 +// GFX1250: v_trunc_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xdd,0xd5,0x7b,0x00,0x00,0x00] -v_trunc_f16_e64 v5, m0 -// GFX1250: v_trunc_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xdd,0xd5,0x7d,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, m0 +// GFX1250: v_trunc_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xdd,0xd5,0x7d,0x00,0x00,0x00] -v_trunc_f16_e64 v5, exec_lo -// GFX1250: v_trunc_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xdd,0xd5,0x7e,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, exec_lo +// GFX1250: v_trunc_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xdd,0xd5,0x7e,0x00,0x00,0x00] -v_trunc_f16_e64 v5, exec_hi -// GFX1250: v_trunc_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xdd,0xd5,0x7f,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, exec_hi +// GFX1250: v_trunc_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xdd,0xd5,0x7f,0x00,0x00,0x00] -v_trunc_f16_e64 v5, null -// GFX1250: v_trunc_f16_e64 v5, null ; encoding: [0x05,0x00,0xdd,0xd5,0x7c,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, null +// GFX1250: v_trunc_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xdd,0xd5,0x7c,0x00,0x00,0x00] -v_trunc_f16_e64 v5, -1 -// GFX1250: v_trunc_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xdd,0xd5,0xc1,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, -1 +// GFX1250: v_trunc_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xdd,0xd5,0xc1,0x00,0x00,0x00] -v_trunc_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_trunc_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xdd,0xd5,0xf0,0x00,0x00,0x08] +v_trunc_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_trunc_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xdd,0xd5,0xf0,0x00,0x00,0x08] -v_trunc_f16_e64 v5, src_scc mul:4 -// GFX1250: v_trunc_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xdd,0xd5,0xfd,0x00,0x00,0x10] +v_trunc_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_trunc_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xdd,0xd5,0xfd,0x00,0x00,0x10] -v_trunc_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_trunc_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdd,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_trunc_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_trunc_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdd,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_trunc_f16 v1.h, v128.l // GFX1250: v_trunc_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xdd,0xd5,0x80,0x01,0x00,0x00] @@ -3868,98 +3868,98 @@ v_tanh_f32_e64 v5, src_scc mul:4 v_tanh_f32_e64 v255, -|0xaf123456| clamp div:2 // GFX1250: v_tanh_f32_e64 v255, -|0xaf123456| clamp div:2 ; encoding: [0xff,0x81,0x9e,0xd5,0xff,0x00,0x00,0x38,0x56,0x34,0x12,0xaf] -v_tanh_f16_e64 v5, v1 -// GFX1250: v_tanh_f16_e64 v5, v1 ; encoding: [0x05,0x00,0x9f,0xd5,0x01,0x01,0x00,0x00] +v_tanh_f16_e64 v5.l, v1.l +// GFX1250: v_tanh_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0x9f,0xd5,0x01,0x01,0x00,0x00] -v_tanh_f16_e64 v5, v255 -// GFX1250: v_tanh_f16_e64 v5, v255 ; encoding: [0x05,0x00,0x9f,0xd5,0xff,0x01,0x00,0x00] +v_tanh_f16_e64 v5.l, v255.l +// GFX1250: v_tanh_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0x9f,0xd5,0xff,0x01,0x00,0x00] -v_tanh_f16_e64 v5, s1 -// GFX1250: v_tanh_f16_e64 v5, s1 ; encoding: [0x05,0x00,0x9f,0xd5,0x01,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, s1 +// GFX1250: v_tanh_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0x9f,0xd5,0x01,0x00,0x00,0x00] -v_tanh_f16_e64 v5, s105 -// GFX1250: v_tanh_f16_e64 v5, s105 ; encoding: [0x05,0x00,0x9f,0xd5,0x69,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, s105 +// GFX1250: v_tanh_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0x9f,0xd5,0x69,0x00,0x00,0x00] -v_tanh_f16_e64 v5, vcc_lo -// GFX1250: v_tanh_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0x9f,0xd5,0x6a,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, vcc_lo +// GFX1250: v_tanh_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0x9f,0xd5,0x6a,0x00,0x00,0x00] -v_tanh_f16_e64 v5, vcc_hi -// GFX1250: v_tanh_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0x9f,0xd5,0x6b,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, vcc_hi +// GFX1250: v_tanh_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0x9f,0xd5,0x6b,0x00,0x00,0x00] -v_tanh_f16_e64 v5, ttmp15 -// GFX1250: v_tanh_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0x9f,0xd5,0x7b,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, ttmp15 +// GFX1250: v_tanh_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0x9f,0xd5,0x7b,0x00,0x00,0x00] -v_tanh_f16_e64 v5, m0 -// GFX1250: v_tanh_f16_e64 v5, m0 ; encoding: [0x05,0x00,0x9f,0xd5,0x7d,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, m0 +// GFX1250: v_tanh_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0x9f,0xd5,0x7d,0x00,0x00,0x00] -v_tanh_f16_e64 v5, exec_lo -// GFX1250: v_tanh_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0x9f,0xd5,0x7e,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, exec_lo +// GFX1250: v_tanh_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0x9f,0xd5,0x7e,0x00,0x00,0x00] -v_tanh_f16_e64 v5, exec_hi -// GFX1250: v_tanh_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0x9f,0xd5,0x7f,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, exec_hi +// GFX1250: v_tanh_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0x9f,0xd5,0x7f,0x00,0x00,0x00] -v_tanh_f16_e64 v5, null -// GFX1250: v_tanh_f16_e64 v5, null ; encoding: [0x05,0x00,0x9f,0xd5,0x7c,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, null +// GFX1250: v_tanh_f16_e64 v5.l, null ; encoding: [0x05,0x00,0x9f,0xd5,0x7c,0x00,0x00,0x00] -v_tanh_f16_e64 v5, -1 -// GFX1250: v_tanh_f16_e64 v5, -1 ; encoding: [0x05,0x00,0x9f,0xd5,0xc1,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, -1 +// GFX1250: v_tanh_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0x9f,0xd5,0xc1,0x00,0x00,0x00] -v_tanh_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_tanh_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0x9f,0xd5,0xf0,0x00,0x00,0x08] +v_tanh_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_tanh_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0x9f,0xd5,0xf0,0x00,0x00,0x08] -v_tanh_f16_e64 v5, src_scc mul:4 -// GFX1250: v_tanh_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0x9f,0xd5,0xfd,0x00,0x00,0x10] +v_tanh_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_tanh_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0x9f,0xd5,0xfd,0x00,0x00,0x10] -v_tanh_f16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_tanh_f16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0x9f,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_tanh_f16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_tanh_f16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0x9f,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_tanh_f16 v5.l, v128.h // GFX1250: v_tanh_f16_e64 v5.l, v128.h op_sel:[1,0] ; encoding: [0x05,0x08,0x9f,0xd5,0x80,0x01,0x00,0x00] -v_tanh_bf16_e64 v5, v1 -// GFX1250: v_tanh_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xca,0xd5,0x01,0x01,0x00,0x00] +v_tanh_bf16_e64 v5.l, v1.l +// GFX1250: v_tanh_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xca,0xd5,0x01,0x01,0x00,0x00] -v_tanh_bf16_e64 v5, v255 -// GFX1250: v_tanh_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xca,0xd5,0xff,0x01,0x00,0x00] +v_tanh_bf16_e64 v5.l, v255.l +// GFX1250: v_tanh_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xca,0xd5,0xff,0x01,0x00,0x00] -v_tanh_bf16_e64 v5, s1 -// GFX1250: v_tanh_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xca,0xd5,0x01,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, s1 +// GFX1250: v_tanh_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xca,0xd5,0x01,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, s105 -// GFX1250: v_tanh_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xca,0xd5,0x69,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, s105 +// GFX1250: v_tanh_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xca,0xd5,0x69,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, vcc_lo -// GFX1250: v_tanh_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xca,0xd5,0x6a,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, vcc_lo +// GFX1250: v_tanh_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xca,0xd5,0x6a,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, vcc_hi -// GFX1250: v_tanh_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xca,0xd5,0x6b,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, vcc_hi +// GFX1250: v_tanh_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xca,0xd5,0x6b,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, ttmp15 -// GFX1250: v_tanh_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xca,0xd5,0x7b,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, ttmp15 +// GFX1250: v_tanh_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xca,0xd5,0x7b,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, m0 -// GFX1250: v_tanh_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xca,0xd5,0x7d,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, m0 +// GFX1250: v_tanh_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xca,0xd5,0x7d,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, exec_lo -// GFX1250: v_tanh_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xca,0xd5,0x7e,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, exec_lo +// GFX1250: v_tanh_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xca,0xd5,0x7e,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, exec_hi -// GFX1250: v_tanh_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xca,0xd5,0x7f,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, exec_hi +// GFX1250: v_tanh_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xca,0xd5,0x7f,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, null -// GFX1250: v_tanh_bf16_e64 v5, null ; encoding: [0x05,0x00,0xca,0xd5,0x7c,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, null +// GFX1250: v_tanh_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xca,0xd5,0x7c,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, -1 -// GFX1250: v_tanh_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, -1 +// GFX1250: v_tanh_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_tanh_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08] +v_tanh_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_tanh_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08] -v_tanh_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_tanh_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10] +v_tanh_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_tanh_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10] -v_tanh_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_tanh_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_tanh_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_tanh_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_tanh_bf16 v5.l, v128.h // GFX1250: v_tanh_bf16_e64 v5.l, v128.h op_sel:[1,0] ; encoding: [0x05,0x08,0xca,0xd5,0x80,0x01,0x00,0x00] @@ -4000,347 +4000,347 @@ v_prng_b32_e64 v5, null v_prng_b32_e64 v5, -1 // GFX1250: v_prng_b32_e64 v5, -1 ; encoding: [0x05,0x00,0xcb,0xd5,0xc1,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, v1 -// GFX1250: v_rcp_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xf9,0xd5,0x01,0x01,0x00,0x00] +v_rcp_bf16_e64 v5.l, v1.l +// GFX1250: v_rcp_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xf9,0xd5,0x01,0x01,0x00,0x00] -v_rcp_bf16_e64 v5, v255 -// GFX1250: v_rcp_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xf9,0xd5,0xff,0x01,0x00,0x00] +v_rcp_bf16_e64 v5.l, v255.l +// GFX1250: v_rcp_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xf9,0xd5,0xff,0x01,0x00,0x00] -v_rcp_bf16_e64 v5, s1 -// GFX1250: v_rcp_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xf9,0xd5,0x01,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, s1 +// GFX1250: v_rcp_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xf9,0xd5,0x01,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, s105 -// GFX1250: v_rcp_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xf9,0xd5,0x69,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, s105 +// GFX1250: v_rcp_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xf9,0xd5,0x69,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, vcc_lo -// GFX1250: v_rcp_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xf9,0xd5,0x6a,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, vcc_lo +// GFX1250: v_rcp_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xf9,0xd5,0x6a,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, vcc_hi -// GFX1250: v_rcp_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x6b,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, vcc_hi +// GFX1250: v_rcp_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x6b,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, ttmp15 -// GFX1250: v_rcp_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xf9,0xd5,0x7b,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, ttmp15 +// GFX1250: v_rcp_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xf9,0xd5,0x7b,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, m0 -// GFX1250: v_rcp_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xf9,0xd5,0x7d,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, m0 +// GFX1250: v_rcp_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xf9,0xd5,0x7d,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, exec_lo -// GFX1250: v_rcp_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xf9,0xd5,0x7e,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, exec_lo +// GFX1250: v_rcp_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xf9,0xd5,0x7e,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, exec_hi -// GFX1250: v_rcp_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x7f,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, exec_hi +// GFX1250: v_rcp_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x7f,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, null -// GFX1250: v_rcp_bf16_e64 v5, null ; encoding: [0x05,0x00,0xf9,0xd5,0x7c,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, null +// GFX1250: v_rcp_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xf9,0xd5,0x7c,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, -1 -// GFX1250: v_rcp_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, -1 +// GFX1250: v_rcp_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_rcp_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08] +v_rcp_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_rcp_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08] -v_rcp_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_rcp_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10] +v_rcp_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_rcp_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10] -v_rcp_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_rcp_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_rcp_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_rcp_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_rcp_bf16 v5.h, v128.h // GFX1250: v_rcp_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xf9,0xd5,0x80,0x01,0x00,0x00] -v_sqrt_bf16_e64 v5, v1 -// GFX1250: v_sqrt_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfa,0xd5,0x01,0x01,0x00,0x00] +v_sqrt_bf16_e64 v5.l, v1.l +// GFX1250: v_sqrt_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xfa,0xd5,0x01,0x01,0x00,0x00] -v_sqrt_bf16_e64 v5, v255 -// GFX1250: v_sqrt_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xfa,0xd5,0xff,0x01,0x00,0x00] +v_sqrt_bf16_e64 v5.l, v255.l +// GFX1250: v_sqrt_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xfa,0xd5,0xff,0x01,0x00,0x00] -v_sqrt_bf16_e64 v5, s1 -// GFX1250: v_sqrt_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xfa,0xd5,0x01,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, s1 +// GFX1250: v_sqrt_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xfa,0xd5,0x01,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, s105 -// GFX1250: v_sqrt_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfa,0xd5,0x69,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, s105 +// GFX1250: v_sqrt_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfa,0xd5,0x69,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, vcc_lo -// GFX1250: v_sqrt_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xfa,0xd5,0x6a,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, vcc_lo +// GFX1250: v_sqrt_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xfa,0xd5,0x6a,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, vcc_hi -// GFX1250: v_sqrt_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x6b,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, vcc_hi +// GFX1250: v_sqrt_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x6b,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, ttmp15 -// GFX1250: v_sqrt_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfa,0xd5,0x7b,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, ttmp15 +// GFX1250: v_sqrt_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfa,0xd5,0x7b,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, m0 -// GFX1250: v_sqrt_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xfa,0xd5,0x7d,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, m0 +// GFX1250: v_sqrt_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xfa,0xd5,0x7d,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, exec_lo -// GFX1250: v_sqrt_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xfa,0xd5,0x7e,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, exec_lo +// GFX1250: v_sqrt_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xfa,0xd5,0x7e,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, exec_hi -// GFX1250: v_sqrt_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x7f,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, exec_hi +// GFX1250: v_sqrt_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x7f,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, null -// GFX1250: v_sqrt_bf16_e64 v5, null ; encoding: [0x05,0x00,0xfa,0xd5,0x7c,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, null +// GFX1250: v_sqrt_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xfa,0xd5,0x7c,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, -1 -// GFX1250: v_sqrt_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, -1 +// GFX1250: v_sqrt_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_sqrt_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08] +v_sqrt_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_sqrt_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08] -v_sqrt_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_sqrt_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10] +v_sqrt_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_sqrt_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10] -v_sqrt_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_sqrt_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_sqrt_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_sqrt_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_sqrt_bf16 v5.h, v128.h // GFX1250: v_sqrt_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfa,0xd5,0x80,0x01,0x00,0x00] -v_rsq_bf16_e64 v5, v1 -// GFX1250: v_rsq_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfb,0xd5,0x01,0x01,0x00,0x00] +v_rsq_bf16_e64 v5.l, v1.l +// GFX1250: v_rsq_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xfb,0xd5,0x01,0x01,0x00,0x00] -v_rsq_bf16_e64 v5, v255 -// GFX1250: v_rsq_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xfb,0xd5,0xff,0x01,0x00,0x00] +v_rsq_bf16_e64 v5.l, v255.l +// GFX1250: v_rsq_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xfb,0xd5,0xff,0x01,0x00,0x00] -v_rsq_bf16_e64 v5, s1 -// GFX1250: v_rsq_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xfb,0xd5,0x01,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, s1 +// GFX1250: v_rsq_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xfb,0xd5,0x01,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, s105 -// GFX1250: v_rsq_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfb,0xd5,0x69,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, s105 +// GFX1250: v_rsq_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfb,0xd5,0x69,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, vcc_lo -// GFX1250: v_rsq_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xfb,0xd5,0x6a,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, vcc_lo +// GFX1250: v_rsq_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xfb,0xd5,0x6a,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, vcc_hi -// GFX1250: v_rsq_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x6b,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, vcc_hi +// GFX1250: v_rsq_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x6b,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, ttmp15 -// GFX1250: v_rsq_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfb,0xd5,0x7b,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, ttmp15 +// GFX1250: v_rsq_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfb,0xd5,0x7b,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, m0 -// GFX1250: v_rsq_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xfb,0xd5,0x7d,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, m0 +// GFX1250: v_rsq_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xfb,0xd5,0x7d,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, exec_lo -// GFX1250: v_rsq_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xfb,0xd5,0x7e,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, exec_lo +// GFX1250: v_rsq_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xfb,0xd5,0x7e,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, exec_hi -// GFX1250: v_rsq_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x7f,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, exec_hi +// GFX1250: v_rsq_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x7f,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, null -// GFX1250: v_rsq_bf16_e64 v5, null ; encoding: [0x05,0x00,0xfb,0xd5,0x7c,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, null +// GFX1250: v_rsq_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xfb,0xd5,0x7c,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, -1 -// GFX1250: v_rsq_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, -1 +// GFX1250: v_rsq_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_rsq_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08] +v_rsq_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_rsq_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08] -v_rsq_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_rsq_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10] +v_rsq_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_rsq_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10] -v_rsq_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_rsq_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_rsq_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_rsq_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_rsq_bf16 v5.h, v128.h // GFX1250: v_rsq_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfb,0xd5,0x80,0x01,0x00,0x00] -v_log_bf16_e64 v5, v1 -// GFX1250: v_log_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfc,0xd5,0x01,0x01,0x00,0x00] +v_log_bf16_e64 v5.l, v1.l +// GFX1250: v_log_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xfc,0xd5,0x01,0x01,0x00,0x00] -v_log_bf16_e64 v5, v255 -// GFX1250: v_log_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xfc,0xd5,0xff,0x01,0x00,0x00] +v_log_bf16_e64 v5.l, v255.l +// GFX1250: v_log_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xfc,0xd5,0xff,0x01,0x00,0x00] -v_log_bf16_e64 v5, s1 -// GFX1250: v_log_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xfc,0xd5,0x01,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, s1 +// GFX1250: v_log_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xfc,0xd5,0x01,0x00,0x00,0x00] -v_log_bf16_e64 v5, s105 -// GFX1250: v_log_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfc,0xd5,0x69,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, s105 +// GFX1250: v_log_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfc,0xd5,0x69,0x00,0x00,0x00] -v_log_bf16_e64 v5, vcc_lo -// GFX1250: v_log_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xfc,0xd5,0x6a,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, vcc_lo +// GFX1250: v_log_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xfc,0xd5,0x6a,0x00,0x00,0x00] -v_log_bf16_e64 v5, vcc_hi -// GFX1250: v_log_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x6b,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, vcc_hi +// GFX1250: v_log_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x6b,0x00,0x00,0x00] -v_log_bf16_e64 v5, ttmp15 -// GFX1250: v_log_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfc,0xd5,0x7b,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, ttmp15 +// GFX1250: v_log_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfc,0xd5,0x7b,0x00,0x00,0x00] -v_log_bf16_e64 v5, m0 -// GFX1250: v_log_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xfc,0xd5,0x7d,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, m0 +// GFX1250: v_log_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xfc,0xd5,0x7d,0x00,0x00,0x00] -v_log_bf16_e64 v5, exec_lo -// GFX1250: v_log_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xfc,0xd5,0x7e,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, exec_lo +// GFX1250: v_log_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xfc,0xd5,0x7e,0x00,0x00,0x00] -v_log_bf16_e64 v5, exec_hi -// GFX1250: v_log_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x7f,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, exec_hi +// GFX1250: v_log_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x7f,0x00,0x00,0x00] -v_log_bf16_e64 v5, null -// GFX1250: v_log_bf16_e64 v5, null ; encoding: [0x05,0x00,0xfc,0xd5,0x7c,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, null +// GFX1250: v_log_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xfc,0xd5,0x7c,0x00,0x00,0x00] -v_log_bf16_e64 v5, -1 -// GFX1250: v_log_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, -1 +// GFX1250: v_log_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00] -v_log_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_log_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08] +v_log_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_log_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08] -v_log_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_log_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10] +v_log_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_log_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10] -v_log_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_log_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_log_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_log_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_log_bf16 v5.h, v128.h // GFX1250: v_log_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfc,0xd5,0x80,0x01,0x00,0x00] -v_exp_bf16_e64 v5, v1 -// GFX1250: v_exp_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfd,0xd5,0x01,0x01,0x00,0x00] +v_exp_bf16_e64 v5.l, v1.l +// GFX1250: v_exp_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xfd,0xd5,0x01,0x01,0x00,0x00] -v_exp_bf16_e64 v5, v255 -// GFX1250: v_exp_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xfd,0xd5,0xff,0x01,0x00,0x00] +v_exp_bf16_e64 v5.l, v255.l +// GFX1250: v_exp_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xfd,0xd5,0xff,0x01,0x00,0x00] -v_exp_bf16_e64 v5, s1 -// GFX1250: v_exp_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xfd,0xd5,0x01,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, s1 +// GFX1250: v_exp_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xfd,0xd5,0x01,0x00,0x00,0x00] -v_exp_bf16_e64 v5, s105 -// GFX1250: v_exp_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfd,0xd5,0x69,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, s105 +// GFX1250: v_exp_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfd,0xd5,0x69,0x00,0x00,0x00] -v_exp_bf16_e64 v5, vcc_lo -// GFX1250: v_exp_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xfd,0xd5,0x6a,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, vcc_lo +// GFX1250: v_exp_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xfd,0xd5,0x6a,0x00,0x00,0x00] -v_exp_bf16_e64 v5, vcc_hi -// GFX1250: v_exp_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x6b,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, vcc_hi +// GFX1250: v_exp_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x6b,0x00,0x00,0x00] -v_exp_bf16_e64 v5, ttmp15 -// GFX1250: v_exp_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfd,0xd5,0x7b,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, ttmp15 +// GFX1250: v_exp_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfd,0xd5,0x7b,0x00,0x00,0x00] -v_exp_bf16_e64 v5, m0 -// GFX1250: v_exp_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xfd,0xd5,0x7d,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, m0 +// GFX1250: v_exp_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xfd,0xd5,0x7d,0x00,0x00,0x00] -v_exp_bf16_e64 v5, exec_lo -// GFX1250: v_exp_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xfd,0xd5,0x7e,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, exec_lo +// GFX1250: v_exp_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xfd,0xd5,0x7e,0x00,0x00,0x00] -v_exp_bf16_e64 v5, exec_hi -// GFX1250: v_exp_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x7f,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, exec_hi +// GFX1250: v_exp_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x7f,0x00,0x00,0x00] -v_exp_bf16_e64 v5, null -// GFX1250: v_exp_bf16_e64 v5, null ; encoding: [0x05,0x00,0xfd,0xd5,0x7c,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, null +// GFX1250: v_exp_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xfd,0xd5,0x7c,0x00,0x00,0x00] -v_exp_bf16_e64 v5, -1 -// GFX1250: v_exp_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, -1 +// GFX1250: v_exp_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00] -v_exp_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_exp_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08] +v_exp_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_exp_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08] -v_exp_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_exp_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10] +v_exp_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_exp_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10] -v_exp_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_exp_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_exp_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_exp_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_exp_bf16 v5.h, v128.h // GFX1250: v_exp_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfd,0xd5,0x80,0x01,0x00,0x00] -v_sin_bf16_e64 v5, v1 -// GFX1250: v_sin_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfe,0xd5,0x01,0x01,0x00,0x00] +v_sin_bf16_e64 v5.l, v1.l +// GFX1250: v_sin_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xfe,0xd5,0x01,0x01,0x00,0x00] -v_sin_bf16_e64 v5, v255 -// GFX1250: v_sin_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xfe,0xd5,0xff,0x01,0x00,0x00] +v_sin_bf16_e64 v5.l, v255.l +// GFX1250: v_sin_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xfe,0xd5,0xff,0x01,0x00,0x00] -v_sin_bf16_e64 v5, s1 -// GFX1250: v_sin_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xfe,0xd5,0x01,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, s1 +// GFX1250: v_sin_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xfe,0xd5,0x01,0x00,0x00,0x00] -v_sin_bf16_e64 v5, s105 -// GFX1250: v_sin_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfe,0xd5,0x69,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, s105 +// GFX1250: v_sin_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfe,0xd5,0x69,0x00,0x00,0x00] -v_sin_bf16_e64 v5, vcc_lo -// GFX1250: v_sin_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xfe,0xd5,0x6a,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, vcc_lo +// GFX1250: v_sin_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xfe,0xd5,0x6a,0x00,0x00,0x00] -v_sin_bf16_e64 v5, vcc_hi -// GFX1250: v_sin_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x6b,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, vcc_hi +// GFX1250: v_sin_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x6b,0x00,0x00,0x00] -v_sin_bf16_e64 v5, ttmp15 -// GFX1250: v_sin_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfe,0xd5,0x7b,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, ttmp15 +// GFX1250: v_sin_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfe,0xd5,0x7b,0x00,0x00,0x00] -v_sin_bf16_e64 v5, m0 -// GFX1250: v_sin_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xfe,0xd5,0x7d,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, m0 +// GFX1250: v_sin_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xfe,0xd5,0x7d,0x00,0x00,0x00] -v_sin_bf16_e64 v5, exec_lo -// GFX1250: v_sin_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xfe,0xd5,0x7e,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, exec_lo +// GFX1250: v_sin_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xfe,0xd5,0x7e,0x00,0x00,0x00] -v_sin_bf16_e64 v5, exec_hi -// GFX1250: v_sin_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x7f,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, exec_hi +// GFX1250: v_sin_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x7f,0x00,0x00,0x00] -v_sin_bf16_e64 v5, null -// GFX1250: v_sin_bf16_e64 v5, null ; encoding: [0x05,0x00,0xfe,0xd5,0x7c,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, null +// GFX1250: v_sin_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xfe,0xd5,0x7c,0x00,0x00,0x00] -v_sin_bf16_e64 v5, -1 -// GFX1250: v_sin_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, -1 +// GFX1250: v_sin_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00] -v_sin_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_sin_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08] +v_sin_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_sin_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08] -v_sin_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_sin_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10] +v_sin_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_sin_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10] -v_sin_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_sin_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_sin_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_sin_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_sin_bf16 v5.h, v128.h // GFX1250: v_sin_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfe,0xd5,0x80,0x01,0x00,0x00] -v_cos_bf16_e64 v5, v1 -// GFX1250: v_cos_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xff,0xd5,0x01,0x01,0x00,0x00] +v_cos_bf16_e64 v5.l, v1.l +// GFX1250: v_cos_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xff,0xd5,0x01,0x01,0x00,0x00] -v_cos_bf16_e64 v5, v255 -// GFX1250: v_cos_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xff,0xd5,0xff,0x01,0x00,0x00] +v_cos_bf16_e64 v5.l, v255.l +// GFX1250: v_cos_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xff,0xd5,0xff,0x01,0x00,0x00] -v_cos_bf16_e64 v5, s1 -// GFX1250: v_cos_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xff,0xd5,0x01,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, s1 +// GFX1250: v_cos_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xff,0xd5,0x01,0x00,0x00,0x00] -v_cos_bf16_e64 v5, s105 -// GFX1250: v_cos_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xff,0xd5,0x69,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, s105 +// GFX1250: v_cos_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xff,0xd5,0x69,0x00,0x00,0x00] -v_cos_bf16_e64 v5, vcc_lo -// GFX1250: v_cos_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xff,0xd5,0x6a,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, vcc_lo +// GFX1250: v_cos_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xff,0xd5,0x6a,0x00,0x00,0x00] -v_cos_bf16_e64 v5, vcc_hi -// GFX1250: v_cos_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xff,0xd5,0x6b,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, vcc_hi +// GFX1250: v_cos_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xff,0xd5,0x6b,0x00,0x00,0x00] -v_cos_bf16_e64 v5, ttmp15 -// GFX1250: v_cos_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xff,0xd5,0x7b,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, ttmp15 +// GFX1250: v_cos_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xff,0xd5,0x7b,0x00,0x00,0x00] -v_cos_bf16_e64 v5, m0 -// GFX1250: v_cos_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xff,0xd5,0x7d,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, m0 +// GFX1250: v_cos_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xff,0xd5,0x7d,0x00,0x00,0x00] -v_cos_bf16_e64 v5, exec_lo -// GFX1250: v_cos_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xff,0xd5,0x7e,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, exec_lo +// GFX1250: v_cos_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xff,0xd5,0x7e,0x00,0x00,0x00] -v_cos_bf16_e64 v5, exec_hi -// GFX1250: v_cos_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xff,0xd5,0x7f,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, exec_hi +// GFX1250: v_cos_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xff,0xd5,0x7f,0x00,0x00,0x00] -v_cos_bf16_e64 v5, null -// GFX1250: v_cos_bf16_e64 v5, null ; encoding: [0x05,0x00,0xff,0xd5,0x7c,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, null +// GFX1250: v_cos_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xff,0xd5,0x7c,0x00,0x00,0x00] -v_cos_bf16_e64 v5, -1 -// GFX1250: v_cos_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, -1 +// GFX1250: v_cos_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00] -v_cos_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_cos_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08] +v_cos_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_cos_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08] -v_cos_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_cos_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10] +v_cos_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_cos_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10] -v_cos_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_cos_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_cos_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_cos_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_cos_bf16_e64 v5.h, v128.h // GFX1250: v_cos_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xff,0xd5,0x80,0x01,0x00,0x00] -v_cvt_f32_bf16_e64 v5, v1 -// GFX1250: v_cvt_f32_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xf2,0xd5,0x01,0x01,0x00,0x00] +v_cvt_f32_bf16_e64 v5, v1.l +// GFX1250: v_cvt_f32_bf16_e64 v5, v1.l ; encoding: [0x05,0x00,0xf2,0xd5,0x01,0x01,0x00,0x00] -v_cvt_f32_bf16_e64 v5, v255 -// GFX1250: v_cvt_f32_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xf2,0xd5,0xff,0x01,0x00,0x00] +v_cvt_f32_bf16_e64 v5, v255.l +// GFX1250: v_cvt_f32_bf16_e64 v5, v255.l ; encoding: [0x05,0x00,0xf2,0xd5,0xff,0x01,0x00,0x00] v_cvt_f32_bf16_e64 v5, s1 // GFX1250: v_cvt_f32_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xf2,0xd5,0x01,0x00,0x00,0x00] @@ -4372,11 +4372,11 @@ v_cvt_f32_bf16_e64 v5, null v_cvt_f32_bf16_e64 v5, -1 // GFX1250: v_cvt_f32_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xf2,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_f32_bf16_e64 v5, v1 op_sel:[1] -// GFX1250: v_cvt_f32_bf16_e64 v5, v1 op_sel:[1,0] ; encoding: [0x05,0x08,0xf2,0xd5,0x01,0x01,0x00,0x00] +v_cvt_f32_bf16_e64 v5, v1.h op_sel:[1,0] +// GFX1250: v_cvt_f32_bf16_e64 v5, v1.h op_sel:[1,0] ; encoding: [0x05,0x08,0xf2,0xd5,0x01,0x01,0x00,0x00] -v_cvt_f32_bf16_e64 v5, v255 op_sel:[1] -// GFX1250: v_cvt_f32_bf16_e64 v5, v255 op_sel:[1,0] ; encoding: [0x05,0x08,0xf2,0xd5,0xff,0x01,0x00,0x00] +v_cvt_f32_bf16_e64 v5, v255.h op_sel:[1,0] +// GFX1250: v_cvt_f32_bf16_e64 v5, v255.h op_sel:[1,0] ; encoding: [0x05,0x08,0xf2,0xd5,0xff,0x01,0x00,0x00] v_cvt_f32_bf16_e64 v5, s1 op_sel:[1] // GFX1250: v_cvt_f32_bf16_e64 v5, s1 op_sel:[1,0] ; encoding: [0x05,0x08,0xf2,0xd5,0x01,0x00,0x00,0x00] @@ -4492,32 +4492,32 @@ v_cvt_pk_f16_fp8 v1, v150 op_sel:[1] v_cvt_pk_f16_fp8 v1, s2 op_sel:[1] // GFX1250: v_cvt_pk_f16_fp8 v1, s2 op_sel:[1,0] ; encoding: [0x01,0x08,0xf5,0xd5,0x02,0x00,0x00,0x00] -v_sat_pk4_i4_i8 v150, v2 -// GFX1250: v_sat_pk4_i4_i8_e64 v150, v2 ; encoding: [0x96,0x00,0xf3,0xd5,0x02,0x01,0x00,0x00] +v_sat_pk4_i4_i8 v150.l, v2 +// GFX1250: v_sat_pk4_i4_i8_e64 v150.l, v2 ; encoding: [0x96,0x00,0xf3,0xd5,0x02,0x01,0x00,0x00] -v_sat_pk4_i4_i8 v150, s2 -// GFX1250: v_sat_pk4_i4_i8_e64 v150, s2 ; encoding: [0x96,0x00,0xf3,0xd5,0x02,0x00,0x00,0x00] +v_sat_pk4_i4_i8 v150.l, s2 +// GFX1250: v_sat_pk4_i4_i8_e64 v150.l, s2 ; encoding: [0x96,0x00,0xf3,0xd5,0x02,0x00,0x00,0x00] -v_sat_pk4_i4_i8 v150, 2 -// GFX1250: v_sat_pk4_i4_i8_e64 v150, 2 ; encoding: [0x96,0x00,0xf3,0xd5,0x82,0x00,0x00,0x00] +v_sat_pk4_i4_i8 v150.l, 2 +// GFX1250: v_sat_pk4_i4_i8_e64 v150.l, 2 ; encoding: [0x96,0x00,0xf3,0xd5,0x82,0x00,0x00,0x00] -v_sat_pk4_i4_i8 v150, 0x1234 -// GFX1250: v_sat_pk4_i4_i8_e64 v150, 0x1234 ; encoding: [0x96,0x00,0xf3,0xd5,0xff,0x00,0x00,0x00,0x34,0x12,0x00,0x00] +v_sat_pk4_i4_i8 v150.l, 0x1234 +// GFX1250: v_sat_pk4_i4_i8_e64 v150.l, 0x1234 ; encoding: [0x96,0x00,0xf3,0xd5,0xff,0x00,0x00,0x00,0x34,0x12,0x00,0x00] v_sat_pk4_i4_i8 v150.h, v2 // GFX1250: v_sat_pk4_i4_i8_e64 v150.h, v2 op_sel:[0,1] ; encoding: [0x96,0x40,0xf3,0xd5,0x02,0x01,0x00,0x00] -v_sat_pk4_u4_u8 v150, v2 -// GFX1250: v_sat_pk4_u4_u8_e64 v150, v2 ; encoding: [0x96,0x00,0xf4,0xd5,0x02,0x01,0x00,0x00] +v_sat_pk4_u4_u8 v150.l, v2 +// GFX1250: v_sat_pk4_u4_u8_e64 v150.l, v2 ; encoding: [0x96,0x00,0xf4,0xd5,0x02,0x01,0x00,0x00] -v_sat_pk4_u4_u8 v150, s2 -// GFX1250: v_sat_pk4_u4_u8_e64 v150, s2 ; encoding: [0x96,0x00,0xf4,0xd5,0x02,0x00,0x00,0x00] +v_sat_pk4_u4_u8 v150.l, s2 +// GFX1250: v_sat_pk4_u4_u8_e64 v150.l, s2 ; encoding: [0x96,0x00,0xf4,0xd5,0x02,0x00,0x00,0x00] -v_sat_pk4_u4_u8 v150, 2 -// GFX1250: v_sat_pk4_u4_u8_e64 v150, 2 ; encoding: [0x96,0x00,0xf4,0xd5,0x82,0x00,0x00,0x00] +v_sat_pk4_u4_u8 v150.l, 2 +// GFX1250: v_sat_pk4_u4_u8_e64 v150.l, 2 ; encoding: [0x96,0x00,0xf4,0xd5,0x82,0x00,0x00,0x00] -v_sat_pk4_u4_u8 v150, 0x1234 -// GFX1250: v_sat_pk4_u4_u8_e64 v150, 0x1234 ; encoding: [0x96,0x00,0xf4,0xd5,0xff,0x00,0x00,0x00,0x34,0x12,0x00,0x00] +v_sat_pk4_u4_u8 v150.l, 0x1234 +// GFX1250: v_sat_pk4_u4_u8_e64 v150.l, 0x1234 ; encoding: [0x96,0x00,0xf4,0xd5,0xff,0x00,0x00,0x00,0x34,0x12,0x00,0x00] v_sat_pk4_u4_u8 v150.h, v2 // GFX1250: v_sat_pk4_u4_u8_e64 v150.h, v2 op_sel:[0,1] ; encoding: [0x96,0x40,0xf4,0xd5,0x02,0x01,0x00,0x00] diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s index f14705f..d163856 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s @@ -58,120 +58,120 @@ v_tanh_f32_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask // GFX1250: v_tanh_f32_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x9e,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_tanh_f16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_tanh_f16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_mirror -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_tanh_f16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_tanh_f16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_tanh_f16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x9f,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_tanh_f16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_tanh_f16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x9f,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_f16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_tanh_f16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] @@ -222,468 +222,468 @@ v_prng_b32_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf // GFX1250: v_prng_b32_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xcb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rcp_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_rcp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sqrt_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_sqrt_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rsq_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_rsq_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_log_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_log_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_log_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_log_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_log_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_log_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_log_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_log_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_log_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_exp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_exp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_exp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_exp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_exp_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_exp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_sin_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_sin_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_sin_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_sin_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sin_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_sin_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_cos_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_cos_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_cos_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_cos_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cos_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_cos_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xff,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_mirror +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_half_mirror +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_shl:1 +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_shl:15 +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_shr:1 +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_shr:15 +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_ror:1 +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_ror:15 +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_f32_bf16_e64_dpp v5, v128.h quad_perm:[3,2,1,0] @@ -766,24 +766,24 @@ v_cvt_pk_f16_fp8 v1, v128.h quad_perm:[0,1,2,3] // GFX1250: v_cvt_pk_f16_fp8_e64_dpp v1, v128.h op_sel:[1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x08,0xf5,0xd5,0xfa,0x00,0x00,0x00,0x80,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v150, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf -// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x96,0x00,0xf3,0xd5,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff] +v_sat_pk4_i4_i8 v150.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf +// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x96,0x00,0xf3,0xd5,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v150, v2 row_share:1 fi:1 -// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150, v2 row_share:1 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x96,0x00,0xf3,0xd5,0xfa,0x00,0x00,0x00,0x02,0x51,0x05,0xff] +v_sat_pk4_i4_i8 v150.l, v2 row_share:1 fi:1 +// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.l, v2 row_share:1 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x96,0x00,0xf3,0xd5,0xfa,0x00,0x00,0x00,0x02,0x51,0x05,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_i4_i8 v150.h, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf // GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.h, v2 op_sel:[0,1] quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x96,0x40,0xf3,0xd5,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v150, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf -// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x96,0x00,0xf4,0xd5,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff] +v_sat_pk4_u4_u8 v150.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf +// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x96,0x00,0xf4,0xd5,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v150, v2 row_share:1 fi:1 -// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150, v2 row_share:1 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x96,0x00,0xf4,0xd5,0xfa,0x00,0x00,0x00,0x02,0x51,0x05,0xff] +v_sat_pk4_u4_u8 v150.l, v2 row_share:1 fi:1 +// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150.l, v2 row_share:1 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x96,0x00,0xf4,0xd5,0xfa,0x00,0x00,0x00,0x02,0x51,0x05,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_u4_u8 v150.h, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s index 0414421..6ec4d5f 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s @@ -18,40 +18,40 @@ v_tanh_f32_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 // GFX1250: v_tanh_f32_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x9e,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_tanh_f16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x9f,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_tanh_f16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x9f,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_tanh_f16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x9f,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_tanh_f16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x9f,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x9f,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_tanh_f16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x9f,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_tanh_f16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x9f,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_tanh_f16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_tanh_f16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x9f,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_f16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_tanh_f16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0x9f,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_tanh_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] @@ -62,140 +62,140 @@ v_prng_b32_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_prng_b32_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xcb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_rcp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rcp_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_rcp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_sqrt_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sqrt_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_sqrt_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_rsq_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rsq_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_rsq_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_log_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_log_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_log_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_log_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_log_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_log_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_log_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_exp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_exp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_exp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_exp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_exp_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_exp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sin_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_sin_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_sin_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_sin_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sin_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_sin_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_cos_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_cos_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_cos_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_cos_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cos_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] @@ -262,8 +262,8 @@ v_cvt_f16_fp8 v128.l, v2 dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_cvt_f16_fp8_e64_dpp v128.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x80,0x00,0xf7,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf2,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_cvt_f32_bf16_e64_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf2,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_f32_bf16_e64_dpp v5, v128.h dpp8:[7,6,5,4,3,2,1,0] @@ -298,24 +298,24 @@ v_cvt_pk_f16_fp8 v1, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_cvt_pk_f16_fp8_e64_dpp v1, v128.h op_sel:[1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x08,0xf5,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v150, v2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x96,0x00,0xf3,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05] +v_sat_pk4_i4_i8 v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x96,0x00,0xf3,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v150, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x96,0x00,0xf3,0xd5,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05] +v_sat_pk4_i4_i8 v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x96,0x00,0xf3,0xd5,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_i4_i8 v150.h, v2 dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.h, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x96,0x40,0xf3,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v150, v2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x96,0x00,0xf4,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05] +v_sat_pk4_u4_u8 v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x96,0x00,0xf4,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v150, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x96,0x00,0xf4,0xd5,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05] +v_sat_pk4_u4_u8 v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x96,0x00,0xf4,0xd5,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_u4_u8 v150.h, v2 dpp8:[7,6,5,4,3,2,1,0] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt index 07dbbdd..94edf22 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt @@ -720,10 +720,12 @@ # GFX1250: v_cvt_f32_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xe4,0x0a,0x7e] 0x01,0xe5,0x0a,0x7e -# GFX1250: v_cvt_f32_bf16_e32 v5, v1.l ; encoding: [0x01,0xe5,0x0a,0x7e] +# GFX1250-REAL16: v_cvt_f32_bf16_e32 v5, v1.l ; encoding: [0x01,0xe5,0x0a,0x7e] +# GFX1250-FAKE16: v_cvt_f32_bf16_e32 v5, v1 ; encoding: [0x01,0xe5,0x0a,0x7e] 0x7f,0xe5,0x0a,0x7e -# GFX1250: v_cvt_f32_bf16_e32 v5, v127.l ; encoding: [0x7f,0xe5,0x0a,0x7e] +# GFX1250-REAL16: v_cvt_f32_bf16_e32 v5, v127.l ; encoding: [0x7f,0xe5,0x0a,0x7e] +# GFX1250-FAKE16: v_cvt_f32_bf16_e32 v5, v127 ; encoding: [0x7f,0xe5,0x0a,0x7e] 0x6b,0xe4,0x0a,0x7e # GFX1250: v_cvt_f32_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xe4,0x0a,0x7e] @@ -732,7 +734,8 @@ # GFX1250: v_cvt_f32_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xe4,0x0a,0x7e] 0x81,0xe5,0x0a,0x7e -# GFX1250: v_cvt_f32_bf16_e32 v5, v1.h ; encoding: [0x81,0xe5,0x0a,0x7e] +# GFX1250-REAL16: v_cvt_f32_bf16_e32 v5, v1.h ; encoding: [0x81,0xe5,0x0a,0x7e] +# GFX1250-FAKE16: v_cvt_f32_bf16_e32 v5, v129/*Invalid register, operand has 'VS_32_Lo128' register class*/ ; encoding: [0x81,0xe5,0x0a,0x7e] 0xff,0xf0,0x02,0x7e,0x34,0x12,0x00,0x00 # GFX1250-REAL16: v_cvt_f16_bf8_e32 v1.l, 0x1234 ; encoding: [0xff,0xf0,0x02,0x7e,0x34,0x12,0x00,0x00] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt index c12ecb8..93286ca 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt @@ -615,49 +615,64 @@ # GFX1250-REAL16: v_cos_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7f,0x81,0x1b,0x00,0xff] 0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30 -# GFX1250: v_cvt_f32_bf16_dpp v127, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v127, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30] 0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01 -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01] 0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13 -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13] 0xfa,0xe4,0x0a,0x7e,0x81,0x1b,0x00,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x81,0x1b,0x00,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x81,0x1b,0x00,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v129/*Invalid register, operand has 'VGPR_32_Lo128' register class*/ quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x81,0x1b,0x00,0xff] 0xfa,0xf0,0x02,0x7e,0x02,0x39,0x00,0xff # GFX1250-REAL16: v_cvt_f16_bf8_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf0,0x02,0x7e,0x02,0x39,0x00,0xff] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp8.txt index fa7b940..fb3f1b2 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp8.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp8.txt @@ -165,16 +165,20 @@ # GFX1250-FAKE16: v_add_f64_e32 v[156:157], v[129:130], v[187:188] ; encoding: [0x81,0x77,0x39,0x05] 0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00 -# GFX1250: v_cvt_f32_bf16_dpp v127, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v127, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00] 0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05 -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] 0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05 -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] 0xe9,0xe4,0x0a,0x7e,0x81,0x77,0x39,0x05 -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x81,0x77,0x39,0x05] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x81,0x77,0x39,0x05] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v129/*Invalid register, operand has 'VGPR_32_Lo128' register class*/ dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x81,0x77,0x39,0x05] 0xe9,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05 # GFX1250-REAL16: v_cvt_f16_bf8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05] diff --git a/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll b/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll index cba1ba8..ad05684 100644 --- a/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll +++ b/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll @@ -304,32 +304,43 @@ end: define void @pr106083_invalidBBarg_fold(i1 %cmp1, i1 %cmp2, i1 %not, ptr %d) { ; CHECK-LABEL: @pr106083_invalidBBarg_fold( ; CHECK-NEXT: bb: -; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[BB1:%.*]], label [[SEL_SI_UNFOLD_FALSE:%.*]] -; CHECK: sel.si.unfold.false: -; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 1, [[BB:%.*]] ] -; CHECK-NEXT: br label [[BB1]] +; CHECK-NEXT: br label [[BB1:%.*]] ; CHECK: BB1: -; CHECK-NEXT: [[I:%.*]] = phi i16 [ 0, [[BB1_BACKEDGE:%.*]] ], [ 0, [[BB]] ], [ 1, [[BB7:%.*]] ], [ 0, [[SEL_SI_UNFOLD_FALSE]] ], [ 1, [[BB7_JT0:%.*]] ] -; CHECK-NEXT: [[SEL_SI_UNFOLD_PHI:%.*]] = phi i32 [ [[SEL_SI_UNFOLD_PHI]], [[BB1_BACKEDGE]] ], [ [[SEL_SI_UNFOLD_PHI]], [[BB7]] ], [ 0, [[BB]] ], [ [[DOTSI_UNFOLD_PHI1]], [[SEL_SI_UNFOLD_FALSE]] ], [ [[SEL_SI_UNFOLD_PHI]], [[BB7_JT0]] ] +; CHECK-NEXT: [[I:%.*]] = phi i16 [ 0, [[BB1_BACKEDGE:%.*]] ], [ 0, [[BB:%.*]] ], [ 1, [[BB9:%.*]] ], [ 1, [[BB7_JT0:%.*]] ] ; CHECK-NEXT: br i1 [[NOT:%.*]], label [[BB7_JT0]], label [[BB2:%.*]] ; CHECK: BB2: ; CHECK-NEXT: store i16 0, ptr [[D:%.*]], align 2 -; CHECK-NEXT: br i1 [[CMP2:%.*]], label [[BB7]], label [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0:%.*]] +; CHECK-NEXT: br i1 [[CMP2:%.*]], label [[BB7:%.*]], label [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0:%.*]] ; CHECK: spec.select.si.unfold.false: -; CHECK-NEXT: br label [[BB7]] +; CHECK-NEXT: br label [[BB9]] ; CHECK: spec.select.si.unfold.false.jt0: ; CHECK-NEXT: [[DOTSI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ 0, [[BB2]] ] ; CHECK-NEXT: br label [[BB7_JT0]] +; CHECK: sel.si.unfold.true: +; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[BB9]], label [[SEL_SI_UNFOLD_FALSE_JT1:%.*]] +; CHECK: sel.si.unfold.true.jt0: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 0, [[BB2]] ] +; CHECK-NEXT: br i1 [[CMP1]], label [[BB7_JT0]], label [[SEL_SI_UNFOLD_FALSE:%.*]] +; CHECK: sel.si.unfold.false: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2:%.*]] = phi i32 [ 1, [[BB7]] ] +; CHECK-NEXT: br label [[BB9]] +; CHECK: sel.si.unfold.false.jt1: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2_JT1:%.*]] = phi i32 [ 1, [[SEL_SI_UNFOLD_TRUE:%.*]] ] +; CHECK-NEXT: br label [[BB7_JT1:%.*]] ; CHECK: BB7: -; CHECK-NEXT: [[D_PROMOTED4:%.*]] = phi i16 [ 1, [[BB2]] ], [ 1, [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]] ] -; CHECK-NEXT: [[_3:%.*]] = phi i32 [ [[SEL_SI_UNFOLD_PHI]], [[BB2]] ], [ poison, [[SPEC_SELECT_SI_UNFOLD_FALSE]] ] +; CHECK-NEXT: [[D_PROMOTED4:%.*]] = phi i16 [ 1, [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]] ], [ 1, [[SEL_SI_UNFOLD_TRUE]] ], [ 1, [[SEL_SI_UNFOLD_FALSE]] ] +; CHECK-NEXT: [[_3:%.*]] = phi i32 [ poison, [[SPEC_SELECT_SI_UNFOLD_FALSE]] ], [ poison, [[SEL_SI_UNFOLD_TRUE]] ], [ [[DOTSI_UNFOLD_PHI2]], [[SEL_SI_UNFOLD_FALSE]] ] ; CHECK-NEXT: switch i32 [[_3]], label [[BB1_BACKEDGE]] [ ; CHECK-NEXT: i32 0, label [[BB1]] ; CHECK-NEXT: i32 1, label [[BB8:%.*]] ; CHECK-NEXT: ] +; CHECK: BB7.jt1: +; CHECK-NEXT: [[D_PROMOTED4_JT1:%.*]] = phi i16 [ 1, [[SEL_SI_UNFOLD_FALSE_JT1]] ] +; CHECK-NEXT: [[_3_JT1:%.*]] = phi i32 [ [[DOTSI_UNFOLD_PHI2_JT1]], [[SEL_SI_UNFOLD_FALSE_JT1]] ] +; CHECK-NEXT: br label [[BB8]] ; CHECK: BB7.jt0: -; CHECK-NEXT: [[D_PROMOTED4_JT0:%.*]] = phi i16 [ 0, [[BB1]] ], [ 1, [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0]] ] -; CHECK-NEXT: [[_3_JT0:%.*]] = phi i32 [ 0, [[BB1]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0]] ] +; CHECK-NEXT: [[D_PROMOTED4_JT0:%.*]] = phi i16 [ 0, [[BB1]] ], [ 1, [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0]] ], [ 1, [[BB7]] ] +; CHECK-NEXT: [[_3_JT0:%.*]] = phi i32 [ 0, [[BB1]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0]] ], [ [[DOTSI_UNFOLD_PHI1]], [[BB7]] ] ; CHECK-NEXT: br label [[BB1]] ; CHECK: BB1.backedge: ; CHECK-NEXT: br label [[BB1]] @@ -367,30 +378,40 @@ BB8: ; preds = %BB7 define void @pr106083_select_dead_uses(i1 %cmp1, i1 %not, ptr %p) { ; CHECK-LABEL: @pr106083_select_dead_uses( ; CHECK-NEXT: bb: -; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[DOTLOOPEXIT6:%.*]], label [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]] -; CHECK: spec.select.si.unfold.false: -; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 1, [[BB:%.*]] ] -; CHECK-NEXT: br label [[DOTLOOPEXIT6]] +; CHECK-NEXT: br label [[DOTLOOPEXIT6:%.*]] ; CHECK: .loopexit6: -; CHECK-NEXT: [[SPEC_SELECT_SI_UNFOLD_PHI:%.*]] = phi i32 [ [[SPEC_SELECT_SI_UNFOLD_PHI]], [[SELECT_UNFOLD:%.*]] ], [ 0, [[BB]] ], [ [[DOTSI_UNFOLD_PHI1]], [[SPEC_SELECT_SI_UNFOLD_FALSE]] ] ; CHECK-NEXT: br i1 [[NOT:%.*]], label [[SELECT_UNFOLD_JT0:%.*]], label [[BB1:%.*]] ; CHECK: bb1: ; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[P:%.*]], align 4 ; CHECK-NEXT: [[NOT2:%.*]] = icmp eq i32 0, 0 -; CHECK-NEXT: br i1 [[NOT2]], label [[SELECT_UNFOLD]], label [[SPEC_SELECT7_SI_UNFOLD_FALSE_JT0:%.*]] +; CHECK-NEXT: br i1 [[NOT2]], label [[SELECT_UNFOLD:%.*]], label [[SPEC_SELECT7_SI_UNFOLD_FALSE_JT0:%.*]] ; CHECK: spec.select7.si.unfold.false: -; CHECK-NEXT: br label [[SELECT_UNFOLD]] +; CHECK-NEXT: br label [[SELECT_UNFOLD1:%.*]] ; CHECK: spec.select7.si.unfold.false.jt0: ; CHECK-NEXT: [[DOTSI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ 0, [[BB1]] ] ; CHECK-NEXT: br label [[SELECT_UNFOLD_JT0]] +; CHECK: spec.select.si.unfold.true: +; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[SELECT_UNFOLD1]], label [[SPEC_SELECT_SI_UNFOLD_FALSE_JT1:%.*]] +; CHECK: spec.select.si.unfold.true.jt0: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 0, [[BB1]] ] +; CHECK-NEXT: br i1 [[CMP1]], label [[SELECT_UNFOLD_JT0]], label [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]] +; CHECK: spec.select.si.unfold.false: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2:%.*]] = phi i32 [ 1, [[SELECT_UNFOLD]] ] +; CHECK-NEXT: br label [[SELECT_UNFOLD1]] +; CHECK: spec.select.si.unfold.false.jt1: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2_JT1:%.*]] = phi i32 [ 1, [[SPEC_SELECT_SI_UNFOLD_TRUE:%.*]] ] +; CHECK-NEXT: br label [[SELECT_UNFOLD_JT1:%.*]] ; CHECK: select.unfold: -; CHECK-NEXT: [[_2:%.*]] = phi i32 [ [[SPEC_SELECT_SI_UNFOLD_PHI]], [[BB1]] ], [ poison, [[SPEC_SELECT7_SI_UNFOLD_FALSE:%.*]] ] +; CHECK-NEXT: [[_2:%.*]] = phi i32 [ poison, [[SPEC_SELECT7_SI_UNFOLD_FALSE:%.*]] ], [ poison, [[SPEC_SELECT_SI_UNFOLD_TRUE]] ], [ [[DOTSI_UNFOLD_PHI2]], [[SPEC_SELECT_SI_UNFOLD_FALSE]] ] ; CHECK-NEXT: switch i32 [[_2]], label [[BB2:%.*]] [ ; CHECK-NEXT: i32 0, label [[DOTPREHEADER_PREHEADER:%.*]] ; CHECK-NEXT: i32 1, label [[DOTLOOPEXIT6]] ; CHECK-NEXT: ] +; CHECK: select.unfold.jt1: +; CHECK-NEXT: [[_2_JT1:%.*]] = phi i32 [ [[DOTSI_UNFOLD_PHI2_JT1]], [[SPEC_SELECT_SI_UNFOLD_FALSE_JT1]] ] +; CHECK-NEXT: br label [[DOTLOOPEXIT6]] ; CHECK: select.unfold.jt0: -; CHECK-NEXT: [[_2_JT0:%.*]] = phi i32 [ 0, [[DOTLOOPEXIT6]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT7_SI_UNFOLD_FALSE_JT0]] ] +; CHECK-NEXT: [[_2_JT0:%.*]] = phi i32 [ 0, [[DOTLOOPEXIT6]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT7_SI_UNFOLD_FALSE_JT0]] ], [ [[DOTSI_UNFOLD_PHI1]], [[SELECT_UNFOLD]] ] ; CHECK-NEXT: br label [[DOTPREHEADER_PREHEADER]] ; CHECK: .preheader.preheader: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll b/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll index 93872c3..663f459 100644 --- a/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll +++ b/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll @@ -463,3 +463,87 @@ unreachable: sw.bb: ; preds = %if.end br label %while.cond } + +define i16 @pr160250() { +; CHECK-LABEL: @pr160250( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_COND48:%.*]] +; CHECK: for.cond48: +; CHECK-NEXT: br i1 false, label [[CLEANUP87_JT0:%.*]], label [[IF_ELSE:%.*]] +; CHECK: if.else: +; CHECK-NEXT: br i1 false, label [[DOT6_SI_UNFOLD_TRUE:%.*]], label [[DOT5_SI_UNFOLD_TRUE:%.*]] +; CHECK: .5.si.unfold.true: +; CHECK-NEXT: br i1 false, label [[SPEC_SELECT1_SI_UNFOLD_TRUE1:%.*]], label [[DOT5_SI_UNFOLD_FALSE_JT0:%.*]] +; CHECK: .5.si.unfold.true.jt0: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 0, [[IF_ELSE]] ] +; CHECK-NEXT: br i1 false, label [[SPEC_SELECT1_SI_UNFOLD_TRUE:%.*]], label [[DOT5_SI_UNFOLD_FALSE:%.*]] +; CHECK: .5.si.unfold.false: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2:%.*]] = phi i32 [ 0, [[DOT5_SI_UNFOLD_TRUE]] ] +; CHECK-NEXT: br label [[SPEC_SELECT1_SI_UNFOLD_TRUE1]] +; CHECK: .5.si.unfold.false.jt0: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2_JT0:%.*]] = phi i32 [ 0, [[DOT5_SI_UNFOLD_TRUE1:%.*]] ] +; CHECK-NEXT: br label [[SPEC_SELECT1_SI_UNFOLD_TRUE]] +; CHECK: spec.select1.si.unfold.true: +; CHECK-NEXT: [[DOT5_SI_UNFOLD_PHI:%.*]] = phi i32 [ poison, [[DOT5_SI_UNFOLD_TRUE1]] ], [ [[DOTSI_UNFOLD_PHI2]], [[DOT5_SI_UNFOLD_FALSE]] ] +; CHECK-NEXT: br i1 false, label [[SPEC_SELECT_SI_UNFOLD_FALSE1:%.*]], label [[SPEC_SELECT1_SI_UNFOLD_FALSE_JT1:%.*]] +; CHECK: spec.select1.si.unfold.true.jt0: +; CHECK-NEXT: [[DOT5_SI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ [[DOTSI_UNFOLD_PHI1]], [[DOT5_SI_UNFOLD_TRUE]] ], [ [[DOTSI_UNFOLD_PHI2_JT0]], [[DOT5_SI_UNFOLD_FALSE_JT0]] ] +; CHECK-NEXT: br i1 false, label [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]], label [[SPEC_SELECT1_SI_UNFOLD_FALSE_JT0:%.*]] +; CHECK: spec.select1.si.unfold.false: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI:%.*]] = phi i32 [ 0, [[SPEC_SELECT1_SI_UNFOLD_TRUE]] ] +; CHECK-NEXT: br label [[SPEC_SELECT_SI_UNFOLD_FALSE1]] +; CHECK: spec.select1.si.unfold.false.jt0: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ 0, [[SPEC_SELECT1_SI_UNFOLD_TRUE1]] ] +; CHECK-NEXT: br label [[SPEC_SELECT_SI_UNFOLD_FALSE]] +; CHECK: spec.select.si.unfold.false: +; CHECK-NEXT: [[SPEC_SELECT1_SI_UNFOLD_PHI:%.*]] = phi i32 [ [[DOT5_SI_UNFOLD_PHI]], [[SPEC_SELECT1_SI_UNFOLD_TRUE1]] ], [ [[DOTSI_UNFOLD_PHI]], [[SPEC_SELECT1_SI_UNFOLD_FALSE_JT0]] ] +; CHECK-NEXT: br label [[CLEANUP87:%.*]] +; CHECK: spec.select.si.unfold.false.jt0: +; CHECK-NEXT: [[SPEC_SELECT1_SI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ [[DOT5_SI_UNFOLD_PHI_JT0]], [[SPEC_SELECT1_SI_UNFOLD_TRUE]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT1_SI_UNFOLD_FALSE_JT1]] ] +; CHECK-NEXT: br label [[CLEANUP87_JT0]] +; CHECK: .6.si.unfold.true: +; CHECK-NEXT: br i1 false, label [[CLEANUP87]], label [[DOT6_SI_UNFOLD_FALSE_JT0:%.*]] +; CHECK: .6.si.unfold.true.jt0: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI3:%.*]] = phi i32 [ 0, [[IF_ELSE]] ] +; CHECK-NEXT: br i1 false, label [[CLEANUP87_JT0]], label [[DOT6_SI_UNFOLD_FALSE:%.*]] +; CHECK: .6.si.unfold.false: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI4:%.*]] = phi i32 [ 0, [[DOT6_SI_UNFOLD_TRUE]] ] +; CHECK-NEXT: br label [[CLEANUP87]] +; CHECK: .6.si.unfold.false.jt0: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI4_JT0:%.*]] = phi i32 [ 0, [[DOT6_SI_UNFOLD_TRUE1:%.*]] ] +; CHECK-NEXT: br label [[CLEANUP87_JT0]] +; CHECK: cleanup87: +; CHECK-NEXT: [[CLEANUP_DEST_SLOT_3:%.*]] = phi i32 [ [[SPEC_SELECT1_SI_UNFOLD_PHI]], [[SPEC_SELECT_SI_UNFOLD_FALSE1]] ], [ poison, [[DOT6_SI_UNFOLD_TRUE1]] ], [ [[DOTSI_UNFOLD_PHI4]], [[DOT6_SI_UNFOLD_FALSE]] ] +; CHECK-NEXT: switch i32 [[CLEANUP_DEST_SLOT_3]], label [[FOR_COND48_BACKEDGE:%.*]] [ +; CHECK-NEXT: i32 0, label [[FOR_COND48_BACKEDGE]] +; CHECK-NEXT: i32 1, label [[FOR_COND48_BACKEDGE]] +; CHECK-NEXT: ] +; CHECK: cleanup87.jt0: +; CHECK-NEXT: [[CLEANUP_DEST_SLOT_3_JT0:%.*]] = phi i32 [ 0, [[FOR_COND48]] ], [ [[SPEC_SELECT1_SI_UNFOLD_PHI_JT0]], [[SPEC_SELECT_SI_UNFOLD_FALSE]] ], [ [[DOTSI_UNFOLD_PHI3]], [[DOT6_SI_UNFOLD_TRUE]] ], [ [[DOTSI_UNFOLD_PHI4_JT0]], [[DOT6_SI_UNFOLD_FALSE_JT0]] ] +; CHECK-NEXT: br label [[FOR_COND48_BACKEDGE]] +; CHECK: for.cond48.backedge: +; CHECK-NEXT: br label [[FOR_COND48]] +; +entry: + %.5 = select i1 false, i32 0, i32 0 + %.6 = select i1 false, i32 0, i32 0 + br label %for.cond48 + +for.cond48: ; preds = %for.cond48.backedge, %entry + br i1 false, label %cleanup87, label %if.else + +if.else: ; preds = %for.cond48 + %spec.select1 = select i1 false, i32 %.5, i32 0 + %spec.select = select i1 false, i32 %.6, i32 %spec.select1 + br label %cleanup87 + +cleanup87: ; preds = %if.else, %for.cond48 + %cleanup.dest.slot.3 = phi i32 [ 0, %for.cond48 ], [ %spec.select, %if.else ] + switch i32 %cleanup.dest.slot.3, label %for.cond48.backedge [ + i32 0, label %for.cond48.backedge + i32 1, label %for.cond48.backedge + ] + +for.cond48.backedge: ; preds = %cleanup87, %cleanup87, %cleanup87 + br label %for.cond48 +} diff --git a/llvm/test/Transforms/InstCombine/freeze-phi.ll b/llvm/test/Transforms/InstCombine/freeze-phi.ll index cdc9a5e..62bb9dc3 100644 --- a/llvm/test/Transforms/InstCombine/freeze-phi.ll +++ b/llvm/test/Transforms/InstCombine/freeze-phi.ll @@ -212,3 +212,31 @@ D: %y.fr = freeze i32 %y ret i32 %y.fr } + +; Make sure that fmf in phi node is dropped when freeze get folded. + +define float @pr161524(float noundef %arg) { +; CHECK-LABEL: @pr161524( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[COND:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[ARG:%.*]], i32 144) +; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN:%.*]], label [[IF_EXIT:%.*]] +; CHECK: if.then: +; CHECK-NEXT: [[FADD:%.*]] = fadd float [[ARG]], 1.000000e+00 +; CHECK-NEXT: br label [[IF_EXIT]] +; CHECK: if.exit: +; CHECK-NEXT: [[RET:%.*]] = phi float [ [[FADD]], [[IF_THEN]] ], [ [[ARG]], [[ENTRY:%.*]] ] +; CHECK-NEXT: ret float [[RET]] +; +entry: + %cond = tail call i1 @llvm.is.fpclass.f32(float %arg, i32 144) + br i1 %cond, label %if.then, label %if.exit + +if.then: + %fadd = fadd float %arg, 1.0 + br label %if.exit + +if.exit: + %ret = phi ninf float [ %fadd, %if.then ], [ %arg, %entry ] + %ret.fr = freeze float %ret + ret float %ret.fr +} diff --git a/llvm/test/Transforms/InstCombine/freeze.ll b/llvm/test/Transforms/InstCombine/freeze.ll index af5cb0c..ac7d65c 100644 --- a/llvm/test/Transforms/InstCombine/freeze.ll +++ b/llvm/test/Transforms/InstCombine/freeze.ll @@ -1464,6 +1464,27 @@ define ptr @freeze_ptrmask_nonnull(ptr %p, i64 noundef %m) { ret ptr %fr } +define i64 @pr161492_1(i1 %cond) { +; CHECK-LABEL: define i64 @pr161492_1( +; CHECK-SAME: i1 [[COND:%.*]]) { +; CHECK-NEXT: ret i64 0 +; + %fr1 = freeze i64 poison + %fr2 = freeze i64 poison + %ret = select i1 %cond, i64 %fr1, i64 %fr2 + ret i64 %ret +} + +define i64 @pr161492_2(i1 %cond) { +; CHECK-LABEL: define i64 @pr161492_2( +; CHECK-SAME: i1 [[COND:%.*]]) { +; CHECK-NEXT: ret i64 0 +; + %fr = freeze i64 poison + %ret = select i1 %cond, i64 %fr, i64 %fr + ret i64 %ret +} + !0 = !{} !1 = !{i64 4} !2 = !{i32 0, i32 100} diff --git a/llvm/test/Transforms/InstCombine/funnel.ll b/llvm/test/Transforms/InstCombine/funnel.ll index 0e5f046..e573108 100644 --- a/llvm/test/Transforms/InstCombine/funnel.ll +++ b/llvm/test/Transforms/InstCombine/funnel.ll @@ -635,3 +635,29 @@ define i32 @test_rotl_and_neg_wrong_mask(i32 %x, i32 %shamt) { %or = or i32 %shl, %shr ret i32 %or } + +declare void @use(i16) + +; Make sure the reused result does not produce poison. + +define i16 @fshl_concat_vector_may_produce_poison(i4 %x, i12 %y) { +; CHECK-LABEL: @fshl_concat_vector_may_produce_poison( +; CHECK-NEXT: [[X_FR:%.*]] = freeze i4 [[X:%.*]] +; CHECK-NEXT: [[ZEXT_X:%.*]] = zext i4 [[X_FR]] to i16 +; CHECK-NEXT: [[SLX:%.*]] = shl nuw i16 [[ZEXT_X]], 12 +; CHECK-NEXT: [[ZEXT_Y:%.*]] = zext i12 [[Y:%.*]] to i16 +; CHECK-NEXT: [[XY:%.*]] = or disjoint i16 [[SLX]], [[ZEXT_Y]] +; CHECK-NEXT: call void @use(i16 [[XY]]) +; CHECK-NEXT: [[YX:%.*]] = call i16 @llvm.fshl.i16(i16 [[XY]], i16 [[XY]], i16 4) +; CHECK-NEXT: ret i16 [[YX]] +; + %x.fr = freeze i4 %x + %zext.x = zext i4 %x.fr to i16 + %slx = shl nuw nsw i16 %zext.x, 12 + %zext.y = zext i12 %y to i16 + %xy = or disjoint i16 %slx, %zext.y + call void @use(i16 %xy) + %sly = shl nuw i16 %zext.y, 4 + %yx = or disjoint i16 %sly, %zext.x + ret i16 %yx +} diff --git a/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check-dl.ll b/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check-dl.ll new file mode 100644 index 0000000..14a4c95 --- /dev/null +++ b/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check-dl.ll @@ -0,0 +1,50 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 6 +; RUN: opt -passes=loop-idiom -S %s | FileCheck %s + +target datalayout = "p:16:16" + +;. +; CHECK: @.crctable = private constant [256 x i32] zeroinitializer +;. +define void @test_with_dl() { +; CHECK-LABEL: define void @test_with_dl() { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[PH:.*]] +; CHECK: [[PH_LOOPEXIT:.*]]: +; CHECK-NEXT: [[CRC_NEXT_LCSSA:%.*]] = phi i32 [ [[CRC_NEXT3:%.*]], %[[LOOP:.*]] ] +; CHECK-NEXT: br label %[[PH]] +; CHECK: [[PH]]: +; CHECK-NEXT: [[CRC_USE:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[CRC_NEXT_LCSSA]], %[[PH_LOOPEXIT]] ] +; CHECK-NEXT: br label %[[LOOP]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[CRC2:%.*]] = phi i32 [ 0, %[[PH]] ], [ [[CRC_NEXT3]], %[[LOOP]] ] +; CHECK-NEXT: [[INDEXER_LO:%.*]] = trunc i32 [[CRC2]] to i8 +; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_LO]] to i16 +; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i32, ptr @.crctable, i16 [[INDEXER_EXT]] +; CHECK-NEXT: [[TBL_LD:%.*]] = load i32, ptr [[TBL_PTRADD]], align 4 +; CHECK-NEXT: [[CRC_LE_SHIFT:%.*]] = lshr i32 [[CRC2]], 8 +; CHECK-NEXT: [[CRC_NEXT3]] = xor i32 [[CRC_LE_SHIFT]], [[TBL_LD]] +; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 +; CHECK-NEXT: [[EXIT_COND1:%.*]] = icmp ne i16 [[IV]], 0 +; CHECK-NEXT: br i1 [[EXIT_COND1]], label %[[LOOP]], label %[[PH_LOOPEXIT]] +; +entry: + br label %ph + +ph: + %crc.use = phi i32 [ 0, %entry ], [ %crc.next, %loop ] + br label %loop + +loop: + %iv = phi i16 [ 0, %ph ], [ %iv.next, %loop ] + %crc = phi i32 [ 0, %ph ], [ %crc.next, %loop ] + %lshr.crc.1 = lshr i32 %crc, 1 + %crc.and.1 = and i32 %crc, 1 + %sb.check = icmp eq i32 %crc.and.1, 0 + %xor = xor i32 %lshr.crc.1, 0 + %crc.next = select i1 %sb.check, i32 %lshr.crc.1, i32 %xor + %iv.next = add i16 %iv, 1 + %exit.cond = icmp ult i16 %iv, 7 + br i1 %exit.cond, label %loop, label %ph +} diff --git a/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll b/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll index 51dc142..b2ec53c 100644 --- a/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll +++ b/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll @@ -118,8 +118,8 @@ define i16 @crc16.le.tc16(i16 %msg, i16 %checksum) { ; CHECK-NEXT: [[IV_INDEXER:%.*]] = zext i8 [[IV_BITS]] to i16 ; CHECK-NEXT: [[DATA_INDEXER:%.*]] = lshr i16 [[MSG]], [[IV_INDEXER]] ; CHECK-NEXT: [[CRC_DATA_INDEXER:%.*]] = xor i16 [[DATA_INDEXER]], [[CRC2]] -; CHECK-NEXT: [[INDEXER_LO:%.*]] = and i16 [[CRC_DATA_INDEXER]], 255 -; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i16 [[INDEXER_LO]] to i64 +; CHECK-NEXT: [[INDEXER_LO:%.*]] = trunc i16 [[CRC_DATA_INDEXER]] to i8 +; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_LO]] to i64 ; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i16, ptr @.crctable.2, i64 [[INDEXER_EXT]] ; CHECK-NEXT: [[TBL_LD:%.*]] = load i16, ptr [[TBL_PTRADD]], align 2 ; CHECK-NEXT: [[CRC_LE_SHIFT:%.*]] = lshr i16 [[CRC2]], 8 @@ -166,8 +166,8 @@ define i8 @crc8.le.tc16(i16 %msg, i8 %checksum) { ; CHECK-NEXT: [[DATA_INDEXER:%.*]] = lshr i16 [[MSG]], [[IV_INDEXER]] ; CHECK-NEXT: [[CRC_INDEXER_CAST:%.*]] = zext i8 [[CRC2]] to i16 ; CHECK-NEXT: [[CRC_DATA_INDEXER:%.*]] = xor i16 [[DATA_INDEXER]], [[CRC_INDEXER_CAST]] -; CHECK-NEXT: [[INDEXER_LO:%.*]] = and i16 [[CRC_DATA_INDEXER]], 255 -; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i16 [[INDEXER_LO]] to i64 +; CHECK-NEXT: [[INDEXER_LO:%.*]] = trunc i16 [[CRC_DATA_INDEXER]] to i8 +; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_LO]] to i64 ; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i8, ptr @.crctable.3, i64 [[INDEXER_EXT]] ; CHECK-NEXT: [[TBL_LD]] = load i8, ptr [[TBL_PTRADD]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i8 [[IV]], 1 @@ -212,8 +212,8 @@ define i16 @crc16.be.tc8.crc.init.li(i16 %checksum, i8 %msg) { ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[CRC2:%.*]] = phi i16 [ [[CRC_INIT]], %[[ENTRY]] ], [ [[CRC_NEXT3:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[INDEXER_HI:%.*]] = lshr i16 [[CRC2]], 8 -; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = and i16 [[INDEXER_HI]], 255 -; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i16 [[INDEXER_HI_LO_BYTE]] to i64 +; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = trunc i16 [[INDEXER_HI]] to i8 +; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_HI_LO_BYTE]] to i64 ; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i16, ptr @.crctable.4, i64 [[INDEXER_EXT]] ; CHECK-NEXT: [[TBL_LD:%.*]] = load i16, ptr [[TBL_PTRADD]], align 2 ; CHECK-NEXT: [[CRC_BE_SHIFT:%.*]] = shl i16 [[CRC2]], 8 @@ -255,8 +255,8 @@ define i16 @crc16.be.tc8.crc.init.arg(i16 %crc.init) { ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[CRC2:%.*]] = phi i16 [ [[CRC_INIT]], %[[ENTRY]] ], [ [[CRC_NEXT3:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[INDEXER_HI:%.*]] = lshr i16 [[CRC2]], 8 -; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = and i16 [[INDEXER_HI]], 255 -; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i16 [[INDEXER_HI_LO_BYTE]] to i64 +; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = trunc i16 [[INDEXER_HI]] to i8 +; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_HI_LO_BYTE]] to i64 ; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i16, ptr @.crctable.5, i64 [[INDEXER_EXT]] ; CHECK-NEXT: [[TBL_LD:%.*]] = load i16, ptr [[TBL_PTRADD]], align 2 ; CHECK-NEXT: [[CRC_BE_SHIFT:%.*]] = shl i16 [[CRC2]], 8 @@ -295,8 +295,8 @@ define i16 @crc16.be.tc8.crc.init.arg.flipped.sb.check(i16 %crc.init) { ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[CRC2:%.*]] = phi i16 [ [[CRC_INIT]], %[[ENTRY]] ], [ [[CRC_NEXT3:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[INDEXER_HI:%.*]] = lshr i16 [[CRC2]], 8 -; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = and i16 [[INDEXER_HI]], 255 -; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i16 [[INDEXER_HI_LO_BYTE]] to i64 +; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = trunc i16 [[INDEXER_HI]] to i8 +; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_HI_LO_BYTE]] to i64 ; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i16, ptr @.crctable.6, i64 [[INDEXER_EXT]] ; CHECK-NEXT: [[TBL_LD:%.*]] = load i16, ptr [[TBL_PTRADD]], align 2 ; CHECK-NEXT: [[CRC_BE_SHIFT:%.*]] = shl i16 [[CRC2]], 8 @@ -406,8 +406,8 @@ define i32 @crc32.le.tc8.data32(i32 %checksum, i32 %msg) { ; CHECK-NEXT: [[IV_INDEXER:%.*]] = zext i8 [[IV_BITS]] to i32 ; CHECK-NEXT: [[DATA_INDEXER:%.*]] = lshr i32 [[MSG]], [[IV_INDEXER]] ; CHECK-NEXT: [[CRC_DATA_INDEXER:%.*]] = xor i32 [[DATA_INDEXER]], [[CRC2]] -; CHECK-NEXT: [[INDEXER_LO:%.*]] = and i32 [[CRC_DATA_INDEXER]], 255 -; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i32 [[INDEXER_LO]] to i64 +; CHECK-NEXT: [[INDEXER_LO:%.*]] = trunc i32 [[CRC_DATA_INDEXER]] to i8 +; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_LO]] to i64 ; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i32, ptr @.crctable.8, i64 [[INDEXER_EXT]] ; CHECK-NEXT: [[TBL_LD:%.*]] = load i32, ptr [[TBL_PTRADD]], align 4 ; CHECK-NEXT: [[CRC_LE_SHIFT:%.*]] = lshr i32 [[CRC2]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll index 387bb43..2391842 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll @@ -81,17 +81,6 @@ define void @powi_call(ptr %P) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds double, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load double, ptr [[GEP]], align 8 -; CHECK-NEXT: [[POWI:%.*]] = tail call double @llvm.powi.f64.i32(double [[L]], i32 3) -; CHECK-NEXT: store double [[POWI]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll index 56a4683..6e3d257 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll @@ -33,20 +33,7 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,1 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[DST]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3 -; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP19]] -; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[SHR3]] to i8 -; CHECK-NEXT: store i8 [[CONV4]], ptr [[P_OUT_TAIL_09]], align 1 -; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[P_OUT_TAIL_09]], i64 1 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 8 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; @@ -108,20 +95,7 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[DST]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3 -; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP19]] -; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[SHR3]] to i8 -; CHECK-NEXT: store i8 [[CONV4]], ptr [[P_OUT_TAIL_09]], align 1 -; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[P_OUT_TAIL_09]], i64 1 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll index e4ee677..6cf11be 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll @@ -362,8 +362,9 @@ define void @latch_branch_cost(ptr %dst) { ; PRED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], 104 ; PRED-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; PRED: [[MIDDLE_BLOCK]]: -; PRED-NEXT: br [[EXIT:label %.*]] -; PRED: [[SCALAR_PH:.*:]] +; PRED-NEXT: br label %[[EXIT:.*]] +; PRED: [[EXIT]]: +; PRED-NEXT: ret void ; entry: br label %loop @@ -585,8 +586,9 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 { ; PRED-NEXT: [[TMP16:%.*]] = xor i1 [[TMP15]], true ; PRED-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; PRED: [[MIDDLE_BLOCK]]: -; PRED-NEXT: br [[EXIT:label %.*]] -; PRED: [[SCALAR_PH:.*:]] +; PRED-NEXT: br label %[[EXIT:.*]] +; PRED: [[EXIT]]: +; PRED-NEXT: ret void ; entry: br label %loop @@ -609,7 +611,6 @@ exit: } define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) { -; ; COMMON-LABEL: define void @low_trip_count_fold_tail_scalarized_store( ; COMMON-SAME: ptr [[DST:%.*]]) { ; COMMON-NEXT: [[ENTRY:.*:]] @@ -659,16 +660,16 @@ define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) { ; COMMON-NEXT: store i8 6, ptr [[TMP6]], align 1 ; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE12]] ; COMMON: [[PRED_STORE_CONTINUE12]]: -; COMMON-NEXT: br i1 false, label %[[PRED_STORE_IF13:.*]], label %[[EXIT:.*]] +; COMMON-NEXT: br i1 false, label %[[PRED_STORE_IF13:.*]], label %[[EXIT1:.*]] ; COMMON: [[PRED_STORE_IF13]]: ; COMMON-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[DST]], i64 7 ; COMMON-NEXT: store i8 7, ptr [[TMP7]], align 1 -; COMMON-NEXT: br label %[[EXIT]] -; COMMON: [[EXIT]]: -; COMMON-NEXT: br label %[[SCALAR_PH:.*]] -; COMMON: [[SCALAR_PH]]: -; COMMON-NEXT: br [[EXIT1:label %.*]] -; COMMON: [[SCALAR_PH1:.*:]] +; COMMON-NEXT: br label %[[EXIT1]] +; COMMON: [[EXIT1]]: +; COMMON-NEXT: br label %[[SCALAR_PH1:.*]] +; COMMON: [[SCALAR_PH1]]: +; COMMON-NEXT: br [[EXIT:label %.*]] +; COMMON: [[SCALAR_PH:.*:]] ; entry: br label %loop @@ -1160,8 +1161,9 @@ define void @redundant_branch_and_tail_folding(ptr %dst, i1 %c) { ; PRED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24 ; PRED-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; PRED: [[MIDDLE_BLOCK]]: -; PRED-NEXT: br [[EXIT:label %.*]] -; PRED: [[SCALAR_PH:.*:]] +; PRED-NEXT: br label %[[EXIT:.*]] +; PRED: [[EXIT]]: +; PRED-NEXT: ret void ; entry: br label %loop.header diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/drop-poison-generating-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/drop-poison-generating-flags.ll index 1af55e9..71acac2 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/drop-poison-generating-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/drop-poison-generating-flags.ll @@ -65,36 +65,6 @@ define void @check_widen_intrinsic_with_nnan(ptr noalias %dst.0, ptr noalias %ds ; CHECK-NEXT: br i1 [[TMP34]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds double, ptr [[SRC_1]], i64 [[IV]] -; CHECK-NEXT: [[L_1:%.*]] = load double, ptr [[GEP_SRC_1]], align 8 -; CHECK-NEXT: [[ABS:%.*]] = tail call nnan double @llvm.fabs.f64(double [[L_1]]) -; CHECK-NEXT: [[C_0:%.*]] = fcmp olt double [[ABS]], 1.000000e+00 -; CHECK-NEXT: br i1 [[C_0]], label %[[THEN:.*]], label %[[ELSE:.*]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[L_2:%.*]] = load double, ptr [[SRC_2]], align 8 -; CHECK-NEXT: [[IV_SUB_1:%.*]] = add nsw i64 [[IV]], -1 -; CHECK-NEXT: [[GEP_IV_SUB_1:%.*]] = getelementptr double, ptr [[DST_0]], i64 [[IV_SUB_1]] -; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_IV_SUB_1]], align 8 -; CHECK-NEXT: [[C_1:%.*]] = fcmp oeq double [[L_2]], 0.000000e+00 -; CHECK-NEXT: br i1 [[C_1]], label %[[MERGE:.*]], label %[[LOOP_LATCH]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: [[IV_SUB_2:%.*]] = add nsw i64 [[IV]], -1 -; CHECK-NEXT: [[GEP_IV_SUB_2:%.*]] = getelementptr double, ptr [[DST_0]], i64 [[IV_SUB_2]] -; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_IV_SUB_2]], align 8 -; CHECK-NEXT: br label %[[MERGE]] -; CHECK: [[MERGE]]: -; CHECK-NEXT: [[MERGE_IV:%.*]] = phi i64 [ [[IV_SUB_2]], %[[ELSE]] ], [ [[IV_SUB_1]], %[[THEN]] ] -; CHECK-NEXT: [[GEP_DST_1:%.*]] = getelementptr inbounds i32, ptr [[DST_1]], i64 [[MERGE_IV]] -; CHECK-NEXT: store i32 10, ptr [[GEP_DST_1]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll index 890ff1d..4bb8a0e 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll @@ -69,20 +69,7 @@ define i32 @test_phi_iterator_invalidation(ptr %A, ptr noalias %B) { ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SEXT:%.*]] = sext i16 [[SCALAR_RECUR]] to i32 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV_NEXT]] -; CHECK-NEXT: [[FOR_NEXT]] = load i16, ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i32, ptr [[B]], i64 [[IV_NEXT]] -; CHECK-NEXT: store i32 [[SEXT]], ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1001 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret i32 0 ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll index db088f8..bfee39ea 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll @@ -18,21 +18,8 @@ define double @test_reduction_costs() { ; CHECK-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_1:.*]] -; CHECK: [[LOOP_1]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_1]] ] -; CHECK-NEXT: [[R_1:%.*]] = phi double [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[R_1_NEXT:%.*]], %[[LOOP_1]] ] -; CHECK-NEXT: [[R_2:%.*]] = phi double [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[R_2_NEXT:%.*]], %[[LOOP_1]] ] -; CHECK-NEXT: [[R_1_NEXT]] = fadd double [[R_1]], 3.000000e+00 -; CHECK-NEXT: [[R_2_NEXT]] = fadd double [[R_2]], 9.000000e+00 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_1]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[R_1_NEXT_LCSSA:%.*]] = phi double [ [[R_1_NEXT]], %[[LOOP_1]] ], [ [[TMP0]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[R_2_NEXT_LCSSA:%.*]] = phi double [ [[R_2_NEXT]], %[[LOOP_1]] ], [ [[TMP1]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[DIV:%.*]] = fmul double [[R_1_NEXT_LCSSA]], [[R_2_NEXT_LCSSA]] +; CHECK-NEXT: [[DIV:%.*]] = fmul double [[TMP0]], [[TMP1]] ; CHECK-NEXT: ret double [[DIV]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll index a74c33f..42a1940 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll @@ -169,22 +169,9 @@ define i64 @int_and_pointer_iv(ptr %start, i32 %N) { ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i64> [[TMP5]], i32 2 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[RECUR_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[PTR_IV]], align 4 -; CHECK-NEXT: [[RECUR_NEXT]] = zext i32 [[L]] to i64 -; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 4 -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[RECUR_LCSSA:%.*]] = phi i64 [ [[SCALAR_RECUR]], [[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RECUR_LCSSA]] +; CHECK-NEXT: ret i64 [[VECTOR_RECUR_EXTRACT_FOR_PHI]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/invariant-replicate-region.ll b/llvm/test/Transforms/LoopVectorize/AArch64/invariant-replicate-region.ll index f1571e6..d80fdd1 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/invariant-replicate-region.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/invariant-replicate-region.ll @@ -51,22 +51,8 @@ define i32 @test_invariant_replicate_region(i32 %x, i1 %c) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i32> [[PREDPHI]], i32 3 ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[REM_1:%.*]] = urem i32 10, [[X]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[RES:%.*]] = phi i32 [ 0, %[[LOOP_HEADER]] ], [ [[REM_1]], %[[THEN]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 99 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES_LCSSA:%.*]] = phi i32 [ [[RES]], %[[LOOP_LATCH]] ], [ [[TMP17]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RES_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP17]] ; entry: br label %loop.header diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll index dd8bd27..e424649 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll @@ -474,19 +474,8 @@ define i32 @tc4(ptr noundef readonly captures(none) %tmp) vscale_range(1,16) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_0179:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD]] = add i32 [[SUM_0179]], [[TMP5]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4 ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP4]] ; entry: br label %for.body @@ -520,6 +509,7 @@ define i32 @tc4_from_profile(ptr noundef readonly captures(none) %tmp, i64 %N) v ; CHECK-NEXT: [[ADD]] = add i32 [[SUM_0179]], [[TMP0]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]], !prof [[PROF9:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[ADD_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll b/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll index 80bf956..9f518e4 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll @@ -62,18 +62,8 @@ define i32 @add_reduction_select_operand_constant_but_non_uniform() { ; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[ADD2_REASS:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 42, %[[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[ADD2_REASS]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[RDX_NEXT]] = add i32 0, [[RDX]] -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD2_REASS]], 64 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[RDX_NEXT]], %[[LOOP]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP3]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll index 544ef5c..a6e0f8a 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll @@ -32,14 +32,7 @@ define void @sincos_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noali ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1 ; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4 ; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 ; CHECK: [[EXIT:.*:]] ; ; CHECK-ARMPL-LABEL: define void @sincos_f32( @@ -112,14 +105,7 @@ define void @sincos_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noali ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1 ; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8 ; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincos.f64(double [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1 -; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8 -; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8 ; CHECK: [[EXIT:.*:]] ; ; CHECK-ARMPL-LABEL: define void @sincos_f64( @@ -209,15 +195,6 @@ define void @predicated_sincos(float %x, ptr noalias %in, ptr noalias writeonly ; CHECK-ARMPL: [[TMP17:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP15]], 1 ; CHECK-ARMPL: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP16]], ptr [[TMP19:%.*]], i32 4, <vscale x 4 x i1> [[TMP14:%.*]]) ; CHECK-ARMPL: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP17]], ptr [[TMP21:%.*]], i32 4, <vscale x 4 x i1> [[TMP14]]) -; CHECK-ARMPL: [[MIDDLE_BLOCK:.*:]] -; CHECK-ARMPL: [[SCALAR_PH:.*:]] -; CHECK-ARMPL: [[FOR_BODY:.*:]] -; CHECK-ARMPL: [[IF_THEN:.*:]] -; CHECK-ARMPL: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]]) -; CHECK-ARMPL: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK-ARMPL: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK-ARMPL: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK-ARMPL: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 ; CHECK-ARMPL: [[IF_MERGE:.*:]] ; CHECK-ARMPL: [[FOR_END:.*:]] ; @@ -277,14 +254,7 @@ define void @modf_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1 ; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4 ; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.modf.f32(float [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 ; CHECK: [[EXIT:.*:]] ; ; CHECK-ARMPL-LABEL: define void @modf_f32( @@ -357,14 +327,7 @@ define void @modf_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1 ; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8 ; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.modf.f64(double [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1 -; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8 -; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8 ; CHECK: [[EXIT:.*:]] ; ; CHECK-ARMPL-LABEL: define void @modf_f64( @@ -441,14 +404,7 @@ define void @sincospi_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noa ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1 ; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4 ; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincospi.f32(float [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 ; CHECK: [[EXIT:.*:]] ; ; CHECK-ARMPL-LABEL: define void @sincospi_f32( @@ -521,14 +477,7 @@ define void @sincospi_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noa ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1 ; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8 ; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincospi.f64(double [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1 -; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8 -; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8 ; CHECK: [[EXIT:.*:]] ; ; CHECK-ARMPL-LABEL: define void @sincospi_f64( diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll index ff3f6e9..56ace54 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll @@ -30,17 +30,6 @@ define void @always_vectorize(ptr %p, i32 %x) { ; DEFAULT-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]] -; DEFAULT-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -59,17 +48,6 @@ define void @always_vectorize(ptr %p, i32 %x) { ; OPTSIZE-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; OPTSIZE: [[MIDDLE_BLOCK]]: ; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; OPTSIZE: [[SCALAR_PH:.*]]: -; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]] -; OPTSIZE: [[FOR_BODY]]: -; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]] -; OPTSIZE-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4 -; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; OPTSIZE: [[FOR_COND_CLEANUP]]: ; OPTSIZE-NEXT: ret void ; @@ -88,17 +66,6 @@ define void @always_vectorize(ptr %p, i32 %x) { ; MINSIZE-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; MINSIZE: [[MIDDLE_BLOCK]]: ; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MINSIZE: [[SCALAR_PH:.*]]: -; MINSIZE-NEXT: br label %[[FOR_BODY:.*]] -; MINSIZE: [[FOR_BODY]]: -; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]] -; MINSIZE-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4 -; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; MINSIZE: [[FOR_COND_CLEANUP]]: ; MINSIZE-NEXT: ret void ; @@ -390,23 +357,6 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n) ; DEFAULT-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[TMP72:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i8 -; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP72]] -; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP72]], 1 -; DEFAULT-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]] -; DEFAULT-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]] -; DEFAULT-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP72]], 2 -; DEFAULT-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]] -; DEFAULT-NEXT: [[ADD10:%.*]] = add i8 [[ADD]], [[MUL9]] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDVARS_IV]] -; DEFAULT-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -531,23 +481,6 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; DEFAULT-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8 -; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]] -; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1 -; DEFAULT-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]] -; DEFAULT-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]] -; DEFAULT-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP26]], 2 -; DEFAULT-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]] -; DEFAULT-NEXT: [[ADD10:%.*]] = add i8 [[ADD]], [[MUL9]] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[IV]] -; DEFAULT-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1 -; DEFAULT-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 15 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -598,23 +531,6 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; OPTSIZE-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; OPTSIZE: [[MIDDLE_BLOCK]]: ; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; OPTSIZE: [[SCALAR_PH:.*]]: -; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]] -; OPTSIZE: [[FOR_BODY]]: -; OPTSIZE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; OPTSIZE-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8 -; OPTSIZE-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]] -; OPTSIZE-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1 -; OPTSIZE-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]] -; OPTSIZE-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]] -; OPTSIZE-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP26]], 2 -; OPTSIZE-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]] -; OPTSIZE-NEXT: [[ADD10:%.*]] = add i8 [[ADD]], [[MUL9]] -; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[IV]] -; OPTSIZE-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1 -; OPTSIZE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 15 -; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; OPTSIZE: [[FOR_COND_CLEANUP]]: ; OPTSIZE-NEXT: ret void ; @@ -665,23 +581,6 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; MINSIZE-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; MINSIZE: [[MIDDLE_BLOCK]]: ; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MINSIZE: [[SCALAR_PH:.*]]: -; MINSIZE-NEXT: br label %[[FOR_BODY:.*]] -; MINSIZE: [[FOR_BODY]]: -; MINSIZE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MINSIZE-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8 -; MINSIZE-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]] -; MINSIZE-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1 -; MINSIZE-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]] -; MINSIZE-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]] -; MINSIZE-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP26]], 2 -; MINSIZE-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]] -; MINSIZE-NEXT: [[ADD10:%.*]] = add i8 [[ADD]], [[MUL9]] -; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[IV]] -; MINSIZE-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1 -; MINSIZE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 15 -; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; MINSIZE: [[FOR_COND_CLEANUP]]: ; MINSIZE-NEXT: ret void ; @@ -746,23 +645,6 @@ define void @dont_vectorize_with_minsize() { ; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; DEFAULT-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; DEFAULT-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; DEFAULT-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -789,23 +671,6 @@ define void @dont_vectorize_with_minsize() { ; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; OPTSIZE: [[MIDDLE_BLOCK]]: ; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; OPTSIZE: [[SCALAR_PH:.*]]: -; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]] -; OPTSIZE: [[FOR_BODY]]: -; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; OPTSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; OPTSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; OPTSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; OPTSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; OPTSIZE: [[FOR_COND_CLEANUP]]: ; OPTSIZE-NEXT: ret void ; @@ -832,23 +697,6 @@ define void @dont_vectorize_with_minsize() { ; MINSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; MINSIZE: [[MIDDLE_BLOCK]]: ; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MINSIZE: [[SCALAR_PH:.*]]: -; MINSIZE-NEXT: br label %[[FOR_BODY:.*]] -; MINSIZE: [[FOR_BODY]]: -; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; MINSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; MINSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; MINSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; MINSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; MINSIZE: [[FOR_COND_CLEANUP]]: ; MINSIZE-NEXT: ret void ; @@ -913,23 +761,6 @@ define void @vectorization_forced_minsize_reduce_width() { ; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; DEFAULT-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; DEFAULT-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; DEFAULT-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -956,23 +787,6 @@ define void @vectorization_forced_minsize_reduce_width() { ; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; OPTSIZE: [[MIDDLE_BLOCK]]: ; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; OPTSIZE: [[SCALAR_PH:.*]]: -; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]] -; OPTSIZE: [[FOR_BODY]]: -; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; OPTSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; OPTSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; OPTSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; OPTSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; OPTSIZE: [[FOR_COND_CLEANUP]]: ; OPTSIZE-NEXT: ret void ; @@ -999,23 +813,6 @@ define void @vectorization_forced_minsize_reduce_width() { ; MINSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; MINSIZE: [[MIDDLE_BLOCK]]: ; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MINSIZE: [[SCALAR_PH:.*]]: -; MINSIZE-NEXT: br label %[[FOR_BODY:.*]] -; MINSIZE: [[FOR_BODY]]: -; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; MINSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; MINSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; MINSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; MINSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; MINSIZE: [[FOR_COND_CLEANUP]]: ; MINSIZE-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll index 24375dd..dd239c0 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll @@ -28,7 +28,8 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK: middle.block: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: for.exit: +; CHECK-NEXT: ret i32 [[TMP11]] ; entry: br label %for.body @@ -80,7 +81,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 { ; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[IV_NEXT]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[IV_NEXT]] @@ -111,7 +112,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 { ; CHECK-NEXT: [[TMP13]] = add <4 x i32> [[TMP14]], [[VEC_PHI9]] ; CHECK-NEXT: [[INDEX_NEXT14]] = add nuw i64 [[INDEX9]], 4 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT14]], [[N_VEC5]] -; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP13]]) ; CHECK-NEXT: [[CMP_N15:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC5]] @@ -135,7 +136,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 { ; CHECK-NEXT: [[CMP_IV_NEG:%.*]] = icmp ugt i64 [[IV_NEG]], 0 ; CHECK-NEXT: [[CMP_IV:%.*]] = icmp ne i64 [[ACCUM1]], -1 ; CHECK-NEXT: [[EXITCOND:%.*]] = and i1 [[CMP_IV_NEG]], [[CMP_IV]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[WHILE_BODY1]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[WHILE_BODY1]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: while.end.loopexit: ; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ [[ADD]], [[WHILE_BODY1]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[TMP15]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret void @@ -494,11 +495,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) { ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16) ; CHECK-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret i32 [[TMP182]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll index 43fccdc..49e9989 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll @@ -261,7 +261,8 @@ define i32 @sudot_neon(ptr %a, ptr %b) #1 { ; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] ; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: for.exit: +; CHECK-NEXT: ret i32 [[TMP13]] ; ; CHECK-NOI8MM-LABEL: define i32 @sudot_neon( ; CHECK-NOI8MM-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR1:[0-9]+]] { @@ -296,7 +297,8 @@ define i32 @sudot_neon(ptr %a, ptr %b) #1 { ; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP13]], [[TMP12]] ; CHECK-NOI8MM-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) ; CHECK-NOI8MM-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-NOI8MM: scalar.ph: +; CHECK-NOI8MM: for.exit: +; CHECK-NOI8MM-NEXT: ret i32 [[TMP15]] ; entry: br label %for.body @@ -349,12 +351,13 @@ define i32 @usdot_neon(ptr %a, ptr %b) #1 { ; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] ; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: for.exit: +; CHECK-NEXT: ret i32 [[TMP13]] ; ; CHECK-NOI8MM-LABEL: define i32 @usdot_neon( ; CHECK-NOI8MM-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR1]] { @@ -384,12 +387,13 @@ define i32 @usdot_neon(ptr %a, ptr %b) #1 { ; CHECK-NOI8MM-NEXT: [[TMP13]] = add <16 x i32> [[TMP11]], [[VEC_PHI1]] ; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NOI8MM-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-NOI8MM-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NOI8MM-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK-NOI8MM: middle.block: ; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP13]], [[TMP12]] ; CHECK-NOI8MM-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) ; CHECK-NOI8MM-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-NOI8MM: scalar.ph: +; CHECK-NOI8MM: for.exit: +; CHECK-NOI8MM-NEXT: ret i32 [[TMP15]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll index 410993b..801eb81 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll @@ -30,7 +30,8 @@ define i32 @dotp(ptr %a, ptr %b) { ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP9]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @dotp( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { @@ -65,7 +66,8 @@ define i32 @dotp(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP14]] ; ; CHECK-MAXBW-LABEL: define i32 @dotp( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { @@ -90,7 +92,8 @@ define i32 @dotp(ptr %a, ptr %b) { ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: for.exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP9]] ; entry: br label %for.body @@ -196,11 +199,12 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP69]] = add <16 x i32> [[TMP68]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP70:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP69]]) ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP71]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_different_types( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -354,12 +358,13 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP138]] = add <16 x i32> [[TMP136]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP139:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP139]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP139]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP138]], [[TMP137]] ; CHECK-INTERLEAVED-NEXT: [[TMP140:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP140]] ; ; CHECK-MAXBW-LABEL: define i32 @not_dotp_different_types( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -442,11 +447,12 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP69]] = add <16 x i32> [[TMP68]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[TMP70:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-MAXBW-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP69]]) ; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: for.exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP71]] ; entry: br label %for.body @@ -491,11 +497,12 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = add <16 x i32> [[TMP7]], [[TMP8]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP9]], i32 15 ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP11]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_not_loop_carried( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -517,11 +524,12 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = add <16 x i32> [[TMP7]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP9]], i32 15 ; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP11]] ; ; CHECK-MAXBW-LABEL: define i32 @not_dotp_not_loop_carried( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -543,11 +551,12 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = add <16 x i32> [[TMP7]], [[TMP8]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP9]], i32 15 ; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: for.exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP11]] ; entry: br label %for.body @@ -594,11 +603,12 @@ define i32 @not_dotp_not_phi(ptr %a, ptr noalias %b, ptr noalias %c) { ; CHECK-INTERLEAVE1-NEXT: store <16 x i32> [[TMP8]], ptr [[TMP13]], align 4 ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = extractelement <16 x i32> [[TMP7]], i32 15 ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP12]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_not_phi( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] { @@ -622,11 +632,12 @@ define i32 @not_dotp_not_phi(ptr %a, ptr noalias %b, ptr noalias %c) { ; CHECK-INTERLEAVED-NEXT: store <16 x i32> [[TMP8]], ptr [[TMP13]], align 4 ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = extractelement <16 x i32> [[TMP7]], i32 15 ; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP12]] ; ; CHECK-MAXBW-LABEL: define i32 @not_dotp_not_phi( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] { @@ -650,11 +661,12 @@ define i32 @not_dotp_not_phi(ptr %a, ptr noalias %b, ptr noalias %c) { ; CHECK-MAXBW-NEXT: store <16 x i32> [[TMP8]], ptr [[TMP13]], align 4 ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = extractelement <16 x i32> [[TMP7]], i32 15 ; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: for.exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP12]] ; entry: br label %for.body @@ -733,7 +745,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP31]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP33:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE13]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP34:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE10]]) @@ -831,7 +843,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP50]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP51:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP51]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP51]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE10]], [[PARTIAL_REDUCE13]] ; CHECK-INTERLEAVED-NEXT: [[TMP52:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) @@ -897,7 +909,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP31]]) ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP33:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE13]]) ; CHECK-MAXBW-NEXT: [[TMP34:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE10]]) @@ -1292,11 +1304,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16) ; CHECK-INTERLEAVE1-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: br label [[EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP182]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @dotp_predicated( ; CHECK-INTERLEAVED-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -1627,11 +1640,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16) ; CHECK-INTERLEAVED-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVED-NEXT: br label [[EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP182]] ; ; CHECK-MAXBW-LABEL: define i32 @dotp_predicated( ; CHECK-MAXBW-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -1962,11 +1976,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16) ; CHECK-MAXBW-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-MAXBW-NEXT: br label [[EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP182]] ; entry: br label %for.body @@ -2010,12 +2025,14 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP8]] = add <16 x i32> [[TMP7]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP8]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP6]], i32 15 ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: [[RESULT:%.*]] = add i32 [[TMP10]], [[TMP11]] +; CHECK-INTERLEAVE1-NEXT: ret i32 [[RESULT]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_extend_user( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -2045,13 +2062,15 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP14]] = add <16 x i32> [[TMP12]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP14]], [[TMP13]] ; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = extractelement <16 x i32> [[TMP10]], i32 15 ; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: [[RESULT:%.*]] = add i32 [[TMP16]], [[TMP17]] +; CHECK-INTERLEAVED-NEXT: ret i32 [[RESULT]] ; ; CHECK-MAXBW-LABEL: define i32 @not_dotp_extend_user( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -2072,12 +2091,14 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP8]] = add <16 x i32> [[TMP7]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP8]]) ; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP6]], i32 15 ; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: for.exit: +; CHECK-MAXBW-NEXT: [[RESULT:%.*]] = add i32 [[TMP10]], [[TMP11]] +; CHECK-MAXBW-NEXT: ret i32 [[RESULT]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll index 09917fc..6e11e55 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll @@ -501,7 +501,8 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP69]]) ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP71]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_different_types( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -660,7 +661,8 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP138]], [[TMP137]] ; CHECK-INTERLEAVED-NEXT: [[TMP142:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP142]] ; ; CHECK-MAXBW-LABEL: define i32 @not_dotp_different_types( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -747,7 +749,8 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 { ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP138]]) ; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: for.exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP71]] ; entry: br label %for.body @@ -800,7 +803,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = add <vscale x 8 x i32> [[TMP16]], [[TMP17]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = mul nuw i32 [[TMP20]], 8 @@ -848,7 +851,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = add <vscale x 8 x i32> [[TMP25]], [[TMP26]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = mul nuw i32 [[TMP29]], 8 @@ -890,7 +893,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP27:%.*]] = add <vscale x 8 x i32> [[TMP25]], [[TMP26]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = mul nuw i32 [[TMP20]], 8 @@ -949,7 +952,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = add <vscale x 8 x i32> [[TMP16]], [[TMP15]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVE1-NEXT: [[TMP24:%.*]] = mul nuw i32 [[TMP23]], 8 @@ -987,7 +990,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = add <vscale x 8 x i32> [[TMP30]], [[TMP22]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = mul nuw i32 [[TMP27]], 8 @@ -1019,7 +1022,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = add <vscale x 8 x i32> [[TMP20]], [[TMP19]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-MAXBW-NEXT: [[TMP24:%.*]] = mul nuw i32 [[TMP23]], 8 @@ -1108,7 +1111,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP41]] = add <vscale x 4 x i32> [[TMP40]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] ; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP43:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP41]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP35]]) @@ -1226,7 +1229,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP81]] = add <vscale x 4 x i32> [[TMP79]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] ; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[TMP81]], [[TMP80]] ; CHECK-INTERLEAVED-NEXT: [[TMP83:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]]) @@ -1296,7 +1299,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE16]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI4]], <vscale x 8 x i32> [[TMP73]]) ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP74:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP74]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP74]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE16]]) ; CHECK-MAXBW-NEXT: [[TMP40:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE17]]) @@ -1393,11 +1396,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = xor i1 [[TMP20]], true -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]]) ; CHECK-INTERLEAVE1-NEXT: br label [[EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP22]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @dotp_predicated( ; CHECK-INTERLEAVED-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -1430,11 +1434,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = xor i1 [[TMP20]], true -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]]) ; CHECK-INTERLEAVED-NEXT: br label [[EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP22]] ; ; CHECK-MAXBW-LABEL: define i32 @dotp_predicated( ; CHECK-MAXBW-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -1467,11 +1472,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = xor i1 [[TMP19]], true -; CHECK-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-MAXBW-NEXT: br label [[EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP21]] ; entry: br label %for.body @@ -1519,7 +1525,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP14]] = add <vscale x 4 x i32> [[TMP13]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP14]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() @@ -1566,7 +1572,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 4 x i32> [[TMP22]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[TMP24]], [[TMP23]] ; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]]) @@ -1601,7 +1607,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP24]] = add <vscale x 8 x i32> [[TMP22]], [[VEC_PHI1]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[TMP24]]) ; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() @@ -1660,7 +1666,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 2 x i64> [[VEC_PHI]], [[TMP14]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP15]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]] @@ -1707,7 +1713,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 2 x i64> [[VEC_PHI1]], [[TMP23]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i64> [[TMP25]], [[TMP24]] ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[BIN_RDX]]) @@ -1742,7 +1748,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP14]] = add <vscale x 8 x i64> [[VEC_PHI]], [[TMP13]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP14]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]] @@ -1860,7 +1866,7 @@ define void @not_dotp_not_phi2(ptr %matrix, i32 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP23]] = add i32 [[TMP21]], [[TMP15]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP23]], [[TMP22]] ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] @@ -1972,7 +1978,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <8 x i64> [[TMP4]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2010,7 +2016,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP9]] = add <8 x i64> [[TMP7]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <8 x i64> [[TMP9]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[BIN_RDX]]) @@ -2047,7 +2053,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> [[TMP11]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2105,7 +2111,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <8 x i64> [[TMP4]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2143,7 +2149,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP9]] = add <8 x i64> [[TMP7]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <8 x i64> [[TMP9]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[BIN_RDX]]) @@ -2180,7 +2186,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> [[TMP11]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2247,7 +2253,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP18]] = add <vscale x 2 x i64> [[TMP17]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP18]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] @@ -2301,7 +2307,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP28]] = add <vscale x 2 x i64> [[TMP26]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i64> [[TMP28]], [[TMP27]] ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[BIN_RDX]]) @@ -2343,7 +2349,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-MAXBW-NEXT: [[TMP20]] = add <vscale x 8 x i64> [[TMP17]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP20]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] @@ -2465,7 +2471,7 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum, ; CHECK-INTERLEAVE1-NEXT: [[TMP36]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP38:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP36]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP33]]) @@ -2565,7 +2571,7 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum, ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]]) ; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]]) @@ -2665,7 +2671,7 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum, ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]]) ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]]) ; CHECK-MAXBW-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]]) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll index 1ef5b20..db3166c 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll @@ -499,7 +499,8 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]]) ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP13]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @zext_add_reduc_i8_i32_predicated( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]]) #[[ATTR0]] { @@ -527,7 +528,8 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]]) ; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP13]] ; ; CHECK-MAXBW-LABEL: define i32 @zext_add_reduc_i8_i32_predicated( ; CHECK-MAXBW-SAME: ptr [[A:%.*]]) #[[ATTR0]] { @@ -555,7 +557,8 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: for.exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP12]] ; entry: br label %for.body @@ -674,7 +677,7 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = sub <16 x i32> [[VEC_PHI]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP4]]) ; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] @@ -700,7 +703,7 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-INTERLEAVED-NEXT: [[TMP7]] = sub <16 x i32> [[VEC_PHI1]], [[TMP5]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP7]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) @@ -726,7 +729,7 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-MAXBW-NEXT: [[TMP10]] = sub <vscale x 8 x i32> [[VEC_PHI]], [[TMP9]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[TMP10]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] @@ -768,7 +771,7 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = add <16 x i32> [[TMP3]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP4]]) ; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] @@ -794,7 +797,7 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <16 x i32> [[TMP5]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP7]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) @@ -820,7 +823,7 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP9]]) ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] @@ -871,7 +874,7 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <16 x i32> [[VEC_PHI]], [[BROADCAST_SPLAT]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] @@ -906,7 +909,7 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <16 x i32> [[VEC_PHI2]], [[BROADCAST_SPLAT]] ; CHECK-INTERLEAVED-NEXT: [[TMP21]] = add nuw i32 [[VEC_PHI1]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP21]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP7]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) @@ -942,7 +945,7 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 { ; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 16 x i32> [[VEC_PHI]], [[BROADCAST_SPLAT]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv16i32(<vscale x 16 x i32> [[TMP11]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] @@ -993,7 +996,7 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP6]] = add <16 x i32> [[VEC_PHI]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP6]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] @@ -1028,7 +1031,7 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP8]] = add <16 x i32> [[VEC_PHI2]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP22]] = add nuw i32 [[VEC_PHI1]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP22]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP8]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) @@ -1064,7 +1067,7 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP9]]) ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll index c4feabe..edf7e28 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll @@ -50,22 +50,9 @@ define i32 @pr70988(ptr %src, i32 %n) { ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP17]], i32 [[TMP18]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDUC:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDUC_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[MAX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[TMP24:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[INDUC]] -; CHECK-NEXT: [[TMP22:%.*]] = load ptr, ptr [[GEP]], align 8 -; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 -; CHECK-NEXT: [[TMP24]] = tail call i32 @llvm.smax.i32(i32 [[TMP23]], i32 [[MAX]]) -; CHECK-NEXT: [[INDUC_NEXT]] = add nuw nsw i64 [[INDUC]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDUC_NEXT]], [[UMAX]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[TMP24]], [[LOOP]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 [[RDX_MINMAX]] ; entry: %1 = and i32 %n, 15 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll index 0c7dc29..0f82de62 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll @@ -241,42 +241,8 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2) ; PRED: [[MIDDLE_BLOCK]]: ; PRED-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[TMP41]]) ; PRED-NEXT: br label %[[EXIT:.*]] -; PRED: [[SCALAR_PH:.*]]: -; PRED-NEXT: br label %[[LOOP:.*]] -; PRED: [[LOOP]]: -; PRED-NEXT: [[TMP45:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[TMP53:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[SCALAR_RECUR10:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[TMP45]], %[[LOOP]] ] -; PRED-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[SUM_RED:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[RED_2:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[TMP52:%.*]] = add i64 [[Y]], 1 -; PRED-NEXT: [[GEP_1:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP52]] -; PRED-NEXT: [[TMP53]] = load i32, ptr [[GEP_1]], align 4 -; PRED-NEXT: [[OR3:%.*]] = or i32 [[SCALAR_RECUR10]], [[X]] -; PRED-NEXT: [[IV_NEXT1]] = add i64 [[IV1]], 1 -; PRED-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 1 -; PRED-NEXT: [[TMP54:%.*]] = shl i32 [[OR3]], 1 -; PRED-NEXT: [[TMP55:%.*]] = or i32 [[TMP54]], 2 -; PRED-NEXT: [[SHL19:%.*]] = shl i32 [[X]], 1 -; PRED-NEXT: [[TMP56:%.*]] = or i32 [[SHR]], [[SHL19]] -; PRED-NEXT: [[TMP57:%.*]] = or i32 [[TMP56]], [[TMP55]] -; PRED-NEXT: [[TMP58:%.*]] = or i32 [[TMP57]], [[X]] -; PRED-NEXT: [[OR20:%.*]] = or i32 [[Z]], [[X]] -; PRED-NEXT: [[NOT:%.*]] = and i32 [[OR20]], 1 -; PRED-NEXT: [[AND:%.*]] = xor i32 [[NOT]], 1 -; PRED-NEXT: [[IDX_EXT_1:%.*]] = zext i32 [[AND]] to i64 -; PRED-NEXT: [[GEP_2:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[IDX_EXT_1]] -; PRED-NEXT: [[TMP59:%.*]] = load i32, ptr [[GEP_2]], align 4 -; PRED-NEXT: [[SHR24:%.*]] = lshr i32 [[TMP58]], 1 -; PRED-NEXT: [[IDX_EXT_2:%.*]] = zext i32 [[SHR24]] to i64 -; PRED-NEXT: [[GEP_3:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[IDX_EXT_2]] -; PRED-NEXT: [[TMP60:%.*]] = load i32, ptr [[GEP_3]], align 4 -; PRED-NEXT: [[RED_1:%.*]] = or i32 [[TMP59]], [[SUM_RED]] -; PRED-NEXT: [[RED_2]] = or i32 [[RED_1]], [[TMP60]] -; PRED-NEXT: [[EC:%.*]] = icmp eq i64 [[IV1]], [[Y]] -; PRED-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; PRED: [[EXIT]]: -; PRED-NEXT: [[RED_2_LCSSA:%.*]] = phi i32 [ [[RED_2]], %[[LOOP]] ], [ [[TMP44]], %[[MIDDLE_BLOCK]] ] -; PRED-NEXT: ret i32 [[RED_2_LCSSA]] +; PRED-NEXT: ret i32 [[TMP44]] ; entry: br label %loop @@ -497,21 +463,8 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 { ; PRED: [[MIDDLE_BLOCK]]: ; PRED-NEXT: [[TMP19:%.*]] = call i16 @llvm.vector.reduce.or.nxv8i16(<vscale x 8 x i16> [[TMP16]]) ; PRED-NEXT: br label %[[EXIT:.*]] -; PRED: [[SCALAR_PH:.*]]: -; PRED-NEXT: br label %[[LOOP:.*]] -; PRED: [[LOOP]]: -; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[RED:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; PRED-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; PRED-NEXT: [[DIV:%.*]] = udiv i16 [[L]], [[X]] -; PRED-NEXT: [[RED_NEXT]] = or i16 [[DIV]], [[RED]] -; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; PRED-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]] -; PRED-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; PRED: [[EXIT]]: -; PRED-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i16 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP19]], %[[MIDDLE_BLOCK]] ] -; PRED-NEXT: ret i16 [[RED_NEXT_LCSSA]] +; PRED-NEXT: ret i16 [[TMP19]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll index c15e8d4..ab9b48f 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll @@ -616,6 +616,45 @@ exit: ret double %red.next } +define i32 @test_ptr_iv_load_used_by_other_load(ptr %start, ptr %end) { +; CHECK-LABEL: define i32 @test_ptr_iv_load_used_by_other_load( +; CHECK-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi ptr [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ null, %[[ENTRY]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[IV]], align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[TMP0]], align 8 +; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[TMP1]], 0 +; CHECK-NEXT: [[C_EXT:%.*]] = zext i1 [[C]] to i32 +; CHECK-NEXT: [[RED_NEXT]] = or i32 [[RED]], [[C_EXT]] +; CHECK-NEXT: [[IV_NEXT]] = getelementptr nusw i8, ptr [[IV]], i64 32 +; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[IV]], [[END]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RED_LCSSA:%.*]] = phi i32 [ [[RED]], %[[LOOP]] ] +; CHECK-NEXT: ret i32 [[RED_LCSSA]] +; +entry: + br label %loop + +loop: ; preds = %loop, %entry + %iv = phi ptr [ %iv.next, %loop ], [ null, %entry ] + %red = phi i32 [ %red.next, %loop ], [ 0, %entry ] + %0 = load ptr, ptr %iv, align 8 + %1 = load i8, ptr %0, align 8 + %c = icmp ne i8 %1, 0 + %c.ext = zext i1 %c to i32 + %red.next = or i32 %red, %c.ext + %iv.next = getelementptr nusw i8, ptr %iv, i64 32 + %ec = icmp eq ptr %iv, %end + br i1 %ec, label %exit, label %loop + +exit: + ret i32 %red +} + attributes #0 = { "target-cpu"="neoverse-512tvb" } !0 = !{!1, !2, i64 0} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll index 885c7904..5072058 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll @@ -144,20 +144,8 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] -; CHECK-ORDERED-TF: scalar.ph: -; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP12]], [[SUM_07]] -; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-TF-NEXT: ret float [[ADD_LCSSA]] +; CHECK-ORDERED-TF-NEXT: ret float [[TMP9]] ; @@ -390,23 +378,11 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT14]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP39]], i64 [[TMP6]]) ; CHECK-ORDERED-TF-NEXT: [[TMP40:%.*]] = extractelement <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-ORDERED-TF-NEXT: [[TMP41:%.*]] = xor i1 [[TMP40]], true -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP41]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP41]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] -; CHECK-ORDERED-TF: scalar.ph: -; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP42:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP42]], [[SUM_07]] -; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP30]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-TF-NEXT: ret float [[ADD_LCSSA]] +; CHECK-ORDERED-TF-NEXT: ret float [[TMP30]] ; @@ -630,30 +606,12 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = xor i1 [[TMP17]], true -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] -; CHECK-ORDERED-TF: scalar.ph: -; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[ADD_PHI1:%.*]] = phi float [ [[A2]], [[SCALAR_PH:%.*]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[ADD_PHI2:%.*]] = phi float [ [[A1]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ADD1]] = fadd float [[TMP19]], [[ADD_PHI2]] -; CHECK-ORDERED-TF-NEXT: [[OR:%.*]] = or disjoint i64 [[IV]], 1 -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDXB2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OR]] -; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ADD2]] = fadd float [[TMP20]], [[ADD_PHI1]] -; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 2 -; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[ADD1_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY]] ], [ [[TMP16]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-TF-NEXT: [[ADD2_LCSSA:%.*]] = phi float [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-TF-NEXT: store float [[ADD1_LCSSA]], ptr [[A]], align 4 -; CHECK-ORDERED-TF-NEXT: store float [[ADD2_LCSSA]], ptr [[ARRAYIDXA]], align 4 +; CHECK-ORDERED-TF-NEXT: store float [[TMP16]], ptr [[A]], align 4 +; CHECK-ORDERED-TF-NEXT: store float [[TMP14]], ptr [[ARRAYIDXA]], align 4 ; CHECK-ORDERED-TF-NEXT: ret void ; @@ -863,28 +821,13 @@ define float @fadd_of_sum(ptr noalias nocapture readonly %a, ptr noalias nocaptu ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP7]]) ; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = xor i1 [[TMP13]], true -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END_LOOPEXIT:%.*]] -; CHECK-ORDERED-TF: scalar.ph: -; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-ORDERED-TF-NEXT: [[RES_014:%.*]] = phi float [ [[RDX:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ] -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ADD:%.*]] = fadd float [[TMP15]], [[TMP16]] -; CHECK-ORDERED-TF-NEXT: [[RDX]] = fadd float [[RES_014]], [[ADD]] -; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-ORDERED-TF: for.end.loopexit: -; CHECK-ORDERED-TF-NEXT: [[RDX_LCSSA:%.*]] = phi float [ [[RDX]], [[FOR_BODY]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[RDX_LCSSA]], [[FOR_END_LOOPEXIT]] ] +; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP12]], [[FOR_END_LOOPEXIT]] ] ; CHECK-ORDERED-TF-NEXT: ret float [[RES]] ; @@ -1081,31 +1024,11 @@ define float @fadd_conditional(ptr noalias nocapture readonly %a, ptr noalias no ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP6]]) ; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = xor i1 [[TMP13]], true -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] -; CHECK-ORDERED-TF: scalar.ph: -; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 1.000000e+00, [[SCALAR_PH]] ], [ [[FADD:%.*]], [[FOR_INC]] ] -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-TF-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP15]], 0.000000e+00 -; CHECK-ORDERED-TF-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; CHECK-ORDERED-TF: if.then: -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-ORDERED-TF-NEXT: br label [[FOR_INC]] -; CHECK-ORDERED-TF: for.inc: -; CHECK-ORDERED-TF-NEXT: [[PHI:%.*]] = phi float [ [[TMP16]], [[IF_THEN]] ], [ 3.000000e+00, [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[FADD]] = fadd float [[RES]], [[PHI]] -; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[RDX:%.*]] = phi float [ [[FADD]], [[FOR_INC]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-TF-NEXT: ret float [[RDX]] +; CHECK-ORDERED-TF-NEXT: ret float [[TMP12]] ; @@ -1245,7 +1168,7 @@ define float @fadd_multiple(ptr noalias nocapture %a, ptr noalias nocapture %b, ; CHECK-ORDERED-TF-NEXT: [[ADD3]] = fadd float [[ADD]], [[TMP1]] ; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK-ORDERED-TF: for.end: ; CHECK-ORDERED-TF-NEXT: [[RDX:%.*]] = phi float [ [[ADD3]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: ret float [[RDX]] @@ -1542,25 +1465,11 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP53]], i64 [[TMP6]]) ; CHECK-ORDERED-TF-NEXT: [[TMP54:%.*]] = extractelement <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-ORDERED-TF-NEXT: [[TMP55:%.*]] = xor i1 [[TMP54]], true -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] -; CHECK-ORDERED-TF: scalar.ph: -; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP56:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP57:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-ORDERED-TF-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP56]], float [[TMP57]], float [[SUM_07]]) -; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP44]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-TF-NEXT: ret float [[MULADD_LCSSA]] +; CHECK-ORDERED-TF-NEXT: ret float [[TMP44]] ; @@ -1852,25 +1761,11 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP53]], i64 [[TMP6]]) ; CHECK-ORDERED-TF-NEXT: [[TMP54:%.*]] = extractelement <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-ORDERED-TF-NEXT: [[TMP55:%.*]] = xor i1 [[TMP54]], true -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] -; CHECK-ORDERED-TF: scalar.ph: -; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP56:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP57:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-ORDERED-TF-NEXT: [[MULADD]] = tail call nnan float @llvm.fmuladd.f32(float [[TMP56]], float [[TMP57]], float [[SUM_07]]) -; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP44]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-TF-NEXT: ret float [[MULADD_LCSSA]] +; CHECK-ORDERED-TF-NEXT: ret float [[TMP44]] ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll b/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll index 4e989c5..3b016f8 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll @@ -129,20 +129,8 @@ define i64 @same_exit_block_pre_inc_use4() { ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP8]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i64, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i64 [[INDEX]], [[LD1]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -203,21 +191,8 @@ define i64 @loop_contains_safe_call() #1 { ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP9]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load float, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[SQRT:%.*]] = tail call fast float @llvm.sqrt.f32(float [[LD1]]) -; CHECK-NEXT: [[CMP:%.*]] = fcmp fast ult float [[SQRT]], 3.000000e+00 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -365,22 +340,8 @@ define i64 @loop_contains_load_after_early_exit(ptr dereferenceable(1024) align( ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP11]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LD1]], 1 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i64, ptr [[ARRAYIDX2]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[LD2]], [[LOOP_INC]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll index 79fb3fd..c775b44 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll @@ -88,16 +88,7 @@ define void @cost_store_i8(ptr %dst) #0 { ; PRED-NEXT: [[TMP12:%.*]] = xor i1 [[TMP14]], true ; PRED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; PRED: middle.block: -; PRED-NEXT: br label [[EXIT:%.*]] -; PRED: scalar.ph: ; PRED-NEXT: br label [[LOOP:%.*]] -; PRED: loop: -; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; PRED-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]] -; PRED-NEXT: store i8 0, ptr [[GEP]], align 1 -; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; PRED-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 100 -; PRED-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; PRED: exit: ; PRED-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll index 3f230b7..e084307 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll @@ -490,8 +490,7 @@ define float @fadd_predicated(ptr noalias nocapture %a, i64 %n) { ; CHECK-ORDERED: %[[MASK:.*]] = select <2 x i1> %0, <2 x float> %[[PHI]], <2 x float> splat (float -0.000000e+00) ; CHECK-ORDERED: %[[RDX]] = call float @llvm.vector.reduce.fadd.v2f32(float %[[RDX_PHI]], <2 x float> %[[MASK]]) ; CHECK-ORDERED: for.end: -; CHECK-ORDERED: %[[RES_PHI:.*]] = phi float [ %[[FADD:.*]], %for.body ], [ %[[RDX]], %middle.block ] -; CHECK-ORDERED: ret float %[[RES_PHI]] +; CHECK-ORDERED: ret float %[[RDX]] ; CHECK-UNORDERED-LABEL: @fadd_predicated ; CHECK-UNORDERED: vector.ph @@ -507,12 +506,8 @@ define float @fadd_predicated(ptr noalias nocapture %a, i64 %n) { ; CHECK-UNORDERED-NOT: call float @llvm.vector.reduce.fadd ; CHECK-UNORDERED: middle.block ; CHECK-UNORDERED: %[[RDX:.*]] = call float @llvm.vector.reduce.fadd.v2f32(float -0.000000e+00, <2 x float> %[[MASK]]) -; CHECK-UNORDERED: for.body -; CHECK-UNORDERED: %[[LOAD:.*]] = load float, ptr -; CHECK-UNORDERED: %[[FADD2:.*]] = fadd float {{.*}}, %[[LOAD]] ; CHECK-UNORDERED: for.end -; CHECK-UNORDERED: %[[SUM:.*]] = phi float [ %[[FADD2]], %for.body ], [ %[[RDX]], %middle.block ] -; CHECK-UNORDERED: ret float %[[SUM]] +; CHECK-UNORDERED: ret float %[[RDX]] ; CHECK-NOT-VECTORIZED-LABEL: @fadd_predicated ; CHECK-NOT-VECTORIZED-NOT: vector.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll index bdbbfdf..9526a84 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll @@ -31,10 +31,7 @@ define void @struct_return_widen(ptr noalias %in, ptr noalias writeonly %out_a, ; CHECK: [[VECTOR_BODY:.*:]] ; CHECK: [[TMP2:%.*]] = call { <2 x half>, <2 x half> } @fixed_vec_foo(<2 x half> [[WIDE_LOAD:%.*]]) ; CHECK: [[TMP3:%.*]] = call { <2 x half>, <2 x half> } @fixed_vec_foo(<2 x half> [[WIDE_LOAD1:%.*]]) -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { half, half } @foo(half [[IN_VAL:%.*]]) #[[ATTR2:[0-9]+]] ; CHECK: [[EXIT:.*:]] ; entry: @@ -82,12 +79,9 @@ define void @struct_return_replicate(ptr noalias %in, ptr noalias writeonly %out ; CHECK: [[ENTRY:.*:]] ; CHECK: [[VECTOR_PH:.*:]] ; CHECK: [[VECTOR_BODY:.*:]] -; CHECK: [[TMP2:%.*]] = tail call { half, half } @foo(half [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] -; CHECK: [[TMP4:%.*]] = tail call { half, half } @foo(half [[TMP3:%.*]]) #[[ATTR3]] +; CHECK: [[TMP2:%.*]] = tail call { half, half } @foo(half [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] +; CHECK: [[TMP4:%.*]] = tail call { half, half } @foo(half [[TMP3:%.*]]) #[[ATTR2]] ; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] -; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { half, half } @foo(half [[IN_VAL:%.*]]) #[[ATTR3]] ; CHECK: [[EXIT:.*:]] ; entry: @@ -162,7 +156,7 @@ define void @struct_return_scalable(ptr noalias %in, ptr noalias writeonly %out_ ; CHECK: [[MIDDLE_BLOCK:.*:]] ; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { half, half } @foo(half [[IN_VAL:%.*]]) #[[ATTR3]] +; CHECK: [[CALL:%.*]] = tail call { half, half } @foo(half [[IN_VAL:%.*]]) #[[ATTR2]] ; CHECK: [[EXIT:.*:]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll index 8d33ccb..bbc0e33 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll @@ -49,7 +49,7 @@ define void @main_vf_vscale_x_16(ptr %A) #0 { ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] @@ -59,7 +59,7 @@ define void @main_vf_vscale_x_16(ptr %A) #0 { ; CHECK-NEXT: store <8 x i8> splat (i8 1), ptr [[TMP9]], align 1 ; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024 -; CHECK-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK: vec.epilog.scalar.ph: @@ -97,7 +97,7 @@ define void @main_vf_vscale_x_16(ptr %A) #0 { ; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK-VF8: vec.epilog.iter.check: ; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 -; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK-VF8: vec.epilog.ph: ; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-VF8-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] @@ -107,7 +107,7 @@ define void @main_vf_vscale_x_16(ptr %A) #0 { ; CHECK-VF8-NEXT: store <8 x i8> splat (i8 1), ptr [[TMP9]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8 ; CHECK-VF8-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024 -; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-VF8: vec.epilog.middle.block: ; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK-VF8: vec.epilog.scalar.ph: @@ -150,7 +150,7 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) { ; CHECK-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP5]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH:%.*]] @@ -182,13 +182,13 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) { ; CHECK-VF8-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP7]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-VF8-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-VF8-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-VF8: middle.block: ; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK-VF8: vec.epilog.iter.check: ; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 -; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK-VF8: vec.epilog.ph: ; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-VF8-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] @@ -198,7 +198,7 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) { ; CHECK-VF8-NEXT: store <8 x i64> splat (i64 1), ptr [[TMP9]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8 ; CHECK-VF8-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024 -; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK-VF8: vec.epilog.middle.block: ; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK-VF8: vec.epilog.scalar.ph: @@ -261,13 +261,13 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) { ; CHECK-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP7]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[N]], 8 @@ -279,7 +279,7 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) { ; CHECK-NEXT: store <8 x i64> splat (i64 1), ptr [[TMP9]], align 1 ; CHECK-NEXT: [[INDEX_NEXT5]] = add nuw i64 [[INDEX4]], 8 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT5]], [[N_VEC3]] -; CHECK-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[CMP_N6:%.*]] = icmp eq i64 [[N]], [[N_VEC3]] ; CHECK-NEXT: br i1 [[CMP_N6]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -313,13 +313,13 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) { ; CHECK-VF8-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP7]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-VF8-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-VF8-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-VF8: middle.block: ; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK-VF8: vec.epilog.iter.check: ; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 -; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK-VF8: vec.epilog.ph: ; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-VF8-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[N]], 8 @@ -331,7 +331,7 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) { ; CHECK-VF8-NEXT: store <8 x i64> splat (i64 1), ptr [[TMP9]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT5]] = add nuw i64 [[INDEX4]], 8 ; CHECK-VF8-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT5]], [[N_VEC3]] -; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-VF8: vec.epilog.middle.block: ; CHECK-VF8-NEXT: [[CMP_N6:%.*]] = icmp eq i64 [[N]], [[N_VEC3]] ; CHECK-VF8-NEXT: br i1 [[CMP_N6]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -382,14 +382,14 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 10000, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]] ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[START]], i64 10000 @@ -400,7 +400,7 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-NEXT: store <8 x i8> zeroinitializer, ptr [[NEXT_GEP2]], align 1 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 8 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 10000 -; CHECK-NEXT: br i1 [[TMP9]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK: vec.epilog.scalar.ph: @@ -433,14 +433,14 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-VF8-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP6]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-VF8-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-VF8-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-VF8: middle.block: ; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 10000, [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK-VF8: vec.epilog.iter.check: ; CHECK-VF8-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]] ; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 -; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK-VF8: vec.epilog.ph: ; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-VF8-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[START]], i64 10000 @@ -451,7 +451,7 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-VF8-NEXT: store <8 x i8> zeroinitializer, ptr [[NEXT_GEP2]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 8 ; CHECK-VF8-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 10000 -; CHECK-VF8-NEXT: br i1 [[TMP9]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP9]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-VF8: vec.epilog.middle.block: ; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK-VF8: vec.epilog.scalar.ph: @@ -514,13 +514,13 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1 ; CHECK-NEXT: store <vscale x 4 x float> [[TMP13]], ptr [[TMP11]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF15:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[N_MOD_VF4:%.*]] = urem i64 [[N]], 2 @@ -536,7 +536,7 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1 ; CHECK-NEXT: store <2 x float> [[TMP20]], ptr [[TMP19]], align 4 ; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX6]], 2 ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC5]] -; CHECK-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[CMP_N10:%.*]] = icmp eq i64 [[N]], [[N_VEC5]] ; CHECK-NEXT: br i1 [[CMP_N10]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -576,7 +576,7 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1 ; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP11]], ptr [[TMP9]], align 4 ; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] ; CHECK-VF8-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-VF8-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-VF8: middle.block: ; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH:%.*]] @@ -606,18 +606,12 @@ exit: } ; Loop with vscale-based trip count vscale x 1024. -; TODO: No epilogue vectorizations should remain when choosing VF = vscale x 4. define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalias %b) vscale_range(1, 16) #0 { ; CHECK-LABEL: @trip_count_vscale_no_epilogue_iterations( -; CHECK-NEXT: iter.check: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[V:%.*]] = tail call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[N:%.*]] = mul nuw nsw i64 [[V]], 1024 -; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] -; CHECK: vector.main.loop.iter.check: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 3 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 @@ -625,7 +619,7 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[A:%.*]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 2 @@ -644,31 +638,11 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia ; CHECK-NEXT: store <vscale x 4 x float> [[TMP13]], ptr [[TMP11]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] -; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] -; CHECK: vec.epilog.ph: -; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] -; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] -; CHECK: vec.epilog.vector.body: -; CHECK-NEXT: [[INDEX4:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT7:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX4]] -; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x float>, ptr [[TMP18]], align 4 -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX4]] -; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x float>, ptr [[TMP19]], align 4 -; CHECK-NEXT: [[TMP20:%.*]] = fmul <2 x float> [[WIDE_LOAD5]], [[WIDE_LOAD6]] -; CHECK-NEXT: store <2 x float> [[TMP20]], ptr [[TMP19]], align 4 -; CHECK-NEXT: [[INDEX_NEXT7]] = add nuw i64 [[INDEX4]], 2 -; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT7]], [[N]] -; CHECK-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] -; CHECK: vec.epilog.middle.block: -; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] -; CHECK: vec.epilog.scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] +; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: ; @@ -703,7 +677,7 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia ; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP11]], ptr [[TMP9]], align 4 ; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] ; CHECK-VF8-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-VF8-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-VF8: middle.block: ; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH:%.*]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll index 33b3629..3b0bd87 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll @@ -116,7 +116,8 @@ define dso_local void @masked_strided1(ptr noalias nocapture readonly %p, ptr no ; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP19]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP0:![0-9]+]] ; PREDICATED_TAIL_FOLDING: middle.block: ; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_TAIL_FOLDING: scalar.ph: +; PREDICATED_TAIL_FOLDING: for.end: +; PREDICATED_TAIL_FOLDING-NEXT: ret void ; entry: %conv = zext i8 %guard to i32 @@ -243,10 +244,11 @@ define dso_local void @masked_strided2(ptr noalias nocapture readnone %p, ptr no ; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP16]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP4:![0-9]+]] +; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP16]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP3:![0-9]+]] ; PREDICATED_TAIL_FOLDING: middle.block: ; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_TAIL_FOLDING: scalar.ph: +; PREDICATED_TAIL_FOLDING: for.end: +; PREDICATED_TAIL_FOLDING-NEXT: ret void ; entry: %conv = zext i8 %guard to i32 @@ -377,10 +379,11 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no ; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT4]] -; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP18]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP6:![0-9]+]] +; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP18]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP4:![0-9]+]] ; PREDICATED_TAIL_FOLDING: middle.block: ; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_TAIL_FOLDING: scalar.ph: +; PREDICATED_TAIL_FOLDING: for.end: +; PREDICATED_TAIL_FOLDING-NEXT: ret void ; entry: %conv = zext i8 %guard1 to i32 @@ -537,10 +540,11 @@ define dso_local void @masked_strided_factor4(ptr noalias nocapture readonly %p, ; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP23:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP23]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP8:![0-9]+]] +; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP23]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP5:![0-9]+]] ; PREDICATED_TAIL_FOLDING: middle.block: ; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_TAIL_FOLDING: scalar.ph: +; PREDICATED_TAIL_FOLDING: for.end: +; PREDICATED_TAIL_FOLDING-NEXT: ret void ; entry: %conv = zext i8 %guard to i32 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll index 16acd3f..b8b4fbd 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll @@ -69,7 +69,8 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll index 069d369..cb2c003 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll @@ -29,7 +29,8 @@ define void @trip1025_i64(ptr noalias nocapture noundef %dst, ptr noalias nocapt ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: +; CHECK: for.end: +; CHECK-NEXT: ret void ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll index 61448bd..33ee0d6 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll @@ -33,7 +33,10 @@ define void @cannot_overflow_i32_induction_var(ptr noalias %dst, ptr readonly %s ; CHECK-NEXT: br i1 [[TMP5]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: for.cond.cleanup.loopexit: +; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void ; entry: %cmp6.not = icmp eq i32 %N, 0 @@ -87,10 +90,13 @@ define void @can_overflow_i64_induction_var(ptr noalias %dst, ptr readonly %src, ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP7]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP2]]) ; CHECK-NEXT: [[TMP8:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 -; CHECK-NEXT: br i1 [[TMP8]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: for.cond.cleanup.loopexit: +; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void ; entry: %cmp6.not = icmp eq i64 %N, 0 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll index b725669f..b5544dc 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll @@ -36,21 +36,9 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP14]]) -; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[WHILE_BODY:%.*]] -; CHECK: while.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[VAL]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; CHECK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: while.end.loopexit: -; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], [[WHILE_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP19]] ; ; CHECK-IN-LOOP-LABEL: @add_reduction_i32( ; CHECK-IN-LOOP-NEXT: entry: @@ -81,21 +69,9 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 { ; CHECK-IN-LOOP-NEXT: [[TMP19:%.*]] = xor i1 [[TMP18]], true ; CHECK-IN-LOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-IN-LOOP: middle.block: -; CHECK-IN-LOOP-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK-IN-LOOP: scalar.ph: ; CHECK-IN-LOOP-NEXT: br label [[WHILE_BODY:%.*]] -; CHECK-IN-LOOP: while.body: -; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-IN-LOOP-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] -; CHECK-IN-LOOP-NEXT: [[VAL:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-IN-LOOP-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[VAL]] -; CHECK-IN-LOOP-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; CHECK-IN-LOOP-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; CHECK-IN-LOOP-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-IN-LOOP: while.end.loopexit: -; CHECK-IN-LOOP-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], [[WHILE_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; CHECK-IN-LOOP-NEXT: ret i32 [[RED_NEXT_LCSSA]] +; CHECK-IN-LOOP-NEXT: ret i32 [[TMP15]] ; entry: br label %while.body @@ -141,23 +117,11 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP18:%.*]] = xor i1 [[TMP17]], true -; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[WHILE_BODY:%.*]] -; CHECK: while.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR]], i64 [[INDEX]] -; CHECK-NEXT: [[VAL:%.*]] = load float, ptr [[GEP]], align 4 -; CHECK-NEXT: [[RED_NEXT]] = fadd float [[RED]], [[VAL]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; CHECK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: while.end.loopexit: -; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], [[WHILE_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RED_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[TMP14]] ; ; CHECK-IN-LOOP-LABEL: @add_reduction_f32( ; CHECK-IN-LOOP-NEXT: entry: @@ -185,23 +149,11 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 { ; CHECK-IN-LOOP-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-IN-LOOP-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-IN-LOOP-NEXT: [[TMP18:%.*]] = xor i1 [[TMP17]], true -; CHECK-IN-LOOP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-IN-LOOP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-IN-LOOP: middle.block: -; CHECK-IN-LOOP-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK-IN-LOOP: scalar.ph: ; CHECK-IN-LOOP-NEXT: br label [[WHILE_BODY:%.*]] -; CHECK-IN-LOOP: while.body: -; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ] -; CHECK-IN-LOOP-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR]], i64 [[INDEX]] -; CHECK-IN-LOOP-NEXT: [[VAL:%.*]] = load float, ptr [[GEP]], align 4 -; CHECK-IN-LOOP-NEXT: [[RED_NEXT]] = fadd float [[RED]], [[VAL]] -; CHECK-IN-LOOP-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; CHECK-IN-LOOP-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; CHECK-IN-LOOP-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-IN-LOOP: while.end.loopexit: -; CHECK-IN-LOOP-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], [[WHILE_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] -; CHECK-IN-LOOP-NEXT: ret float [[RED_NEXT_LCSSA]] +; CHECK-IN-LOOP-NEXT: ret float [[TMP14]] ; entry: br label %while.body @@ -251,32 +203,12 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 { ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP16:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP18:%.*]] = xor i1 [[TMP16]], true -; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP20]]) -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 7, [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]] -; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP26]], 5 -; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; CHECK: if.then: -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[RDX]], [[TMP27]] -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: [[RES]] = phi i32 [ [[RDX]], [[FOR_BODY]] ], [ [[XOR]], [[IF_THEN]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: for.end: -; CHECK-NEXT: [[RES_LCSSA:%.*]] = phi i32 [ [[RES]], [[FOR_INC]] ], [ [[TMP25]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RES_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP25]] ; ; CHECK-IN-LOOP-LABEL: @cond_xor_reduction( ; CHECK-IN-LOOP-NEXT: entry: @@ -308,31 +240,11 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 { ; CHECK-IN-LOOP-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-IN-LOOP-NEXT: [[TMP22:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-IN-LOOP-NEXT: [[TMP23:%.*]] = xor i1 [[TMP22]], true -; CHECK-IN-LOOP-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-IN-LOOP-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-IN-LOOP: middle.block: -; CHECK-IN-LOOP-NEXT: br label [[FOR_END:%.*]] -; CHECK-IN-LOOP: scalar.ph: -; CHECK-IN-LOOP-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-IN-LOOP: for.body: -; CHECK-IN-LOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-IN-LOOP-NEXT: [[RDX:%.*]] = phi i32 [ 7, [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ] -; CHECK-IN-LOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]] -; CHECK-IN-LOOP-NEXT: [[TMP24:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-IN-LOOP-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP24]], 5 -; CHECK-IN-LOOP-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; CHECK-IN-LOOP: if.then: -; CHECK-IN-LOOP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-IN-LOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-IN-LOOP-NEXT: [[XOR:%.*]] = xor i32 [[RDX]], [[TMP25]] -; CHECK-IN-LOOP-NEXT: br label [[FOR_INC]] -; CHECK-IN-LOOP: for.inc: -; CHECK-IN-LOOP-NEXT: [[RES]] = phi i32 [ [[RDX]], [[FOR_BODY]] ], [ [[XOR]], [[IF_THEN]] ] -; CHECK-IN-LOOP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-IN-LOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-IN-LOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-IN-LOOP-NEXT: br label [[FOR_INC:%.*]] ; CHECK-IN-LOOP: for.end: -; CHECK-IN-LOOP-NEXT: [[RES_LCSSA:%.*]] = phi i32 [ [[RES]], [[FOR_INC]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] -; CHECK-IN-LOOP-NEXT: ret i32 [[RES_LCSSA]] +; CHECK-IN-LOOP-NEXT: ret i32 [[TMP19]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll index 1879386..5531b3c 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll @@ -72,7 +72,8 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: br i1 [[TMP36]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -176,10 +177,11 @@ define void @cond_memset(i32 %val, ptr noalias readonly %cond_ptr, ptr noalias % ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP93]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP66:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP67:%.*]] = xor i1 [[TMP66]], true -; CHECK-NEXT: br i1 [[TMP67]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP67]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll index ec17872..9ebe790 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll @@ -33,7 +33,8 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -73,10 +74,11 @@ define void @simple_memset_v4i32(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX1]], i64 [[TMP2]]) ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP5:%.*]] = xor i1 [[TMP6]], true -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -120,10 +122,11 @@ define void @simple_memcpy(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP14:%.*]] = xor i1 [[TMP12]], true -; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -180,10 +183,11 @@ define void @copy_stride4(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK-NEXT: [[TMP21:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP22:%.*]] = xor i1 [[TMP21]], true ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -231,10 +235,11 @@ define void @simple_gather_scatter(ptr noalias %dst, ptr noalias %src, ptr noali ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP15:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP16:%.*]] = xor i1 [[TMP15]], true -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -284,10 +289,11 @@ define void @uniform_load(ptr noalias %dst, ptr noalias readonly %src, i64 %n) # ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP13:%.*]] = xor i1 [[TMP14]], true -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: +; CHECK: for.end: +; CHECK-NEXT: ret void ; entry: @@ -342,10 +348,11 @@ define void @cond_uniform_load(ptr noalias %dst, ptr noalias readonly %src, ptr ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP18:%.*]] = xor i1 [[TMP17]], true -; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: +; CHECK: for.end: +; CHECK-NEXT: ret void ; entry: @@ -403,10 +410,11 @@ define void @uniform_store(ptr noalias %dst, ptr noalias readonly %src, i64 %n) ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP13:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP12:%.*]] = xor i1 [[TMP13]], true -; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: +; CHECK: for.end: +; CHECK-NEXT: ret void ; entry: @@ -454,10 +462,11 @@ define void @simple_fdiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP13:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP14:%.*]] = xor i1 [[TMP13]], true -; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -509,10 +518,11 @@ define void @simple_idiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP17:%.*]] = xor i1 [[TMP14]], true -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -551,7 +561,7 @@ define void @simple_memset_trip1024(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: store <vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP7]], align 4 ; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], [[TMP3]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END_LOOPEXIT:%.*]], label [[SCALAR_PH:%.*]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll index e7d25a0..742097b 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll @@ -562,7 +562,8 @@ define void @simple_histogram_tailfold(ptr noalias %buckets, ptr readonly %indic ; CHECK-NEXT: br i1 [[TMP11]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: for.exit: +; CHECK-NEXT: ret void ; entry: br label %for.body @@ -626,7 +627,7 @@ define void @simple_histogram_rtdepcheck(ptr noalias %buckets, ptr %array, ptr % ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]] @@ -719,7 +720,7 @@ define void @simple_histogram_64b(ptr noalias %buckets, ptr readonly %indices, i ; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr> [[TMP6]], i64 1, <vscale x 2 x i1> splat (i1 true)) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/synthesize-mask-for-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/synthesize-mask-for-call.ll index e6ff39b..6da3c77c 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/synthesize-mask-for-call.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/synthesize-mask-for-call.ll @@ -317,19 +317,7 @@ define void @test_v4_v4m(ptr noalias %a, ptr readonly %b) #3 { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 -; CHECK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR1:[0-9]+]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; @@ -369,19 +357,7 @@ define void @test_v2_v4m(ptr noalias %a, ptr readonly %b) #3 { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 -; CHECK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR2:[0-9]+]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; @@ -421,19 +397,7 @@ define void @test_v2_v4(ptr noalias %a, ptr readonly %b) #3 { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 -; CHECK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR3:[0-9]+]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll index c44db7d..1607755 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll @@ -71,16 +71,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]] ; DATA-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; DATA: middle.block: -; DATA-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; DATA: scalar.ph: ; DATA-NEXT: br label [[WHILE_BODY:%.*]] -; DATA: while.body: -; DATA-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; DATA-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] -; DATA-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4 -; DATA-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; DATA-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; DATA-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] ; DATA: while.end.loopexit: ; DATA-NEXT: ret void ; @@ -115,16 +106,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA_NO_LANEMASK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC]] ; DATA_NO_LANEMASK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; DATA_NO_LANEMASK: middle.block: -; DATA_NO_LANEMASK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; DATA_NO_LANEMASK: scalar.ph: ; DATA_NO_LANEMASK-NEXT: br label [[WHILE_BODY:%.*]] -; DATA_NO_LANEMASK: while.body: -; DATA_NO_LANEMASK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; DATA_NO_LANEMASK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] -; DATA_NO_LANEMASK-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4 -; DATA_NO_LANEMASK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; DATA_NO_LANEMASK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; DATA_NO_LANEMASK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] ; DATA_NO_LANEMASK: while.end.loopexit: ; DATA_NO_LANEMASK-NEXT: ret void ; @@ -150,16 +132,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA_AND_CONTROL-NEXT: [[TMP7:%.*]] = xor i1 [[TMP6]], true ; DATA_AND_CONTROL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; DATA_AND_CONTROL: middle.block: -; DATA_AND_CONTROL-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; DATA_AND_CONTROL: scalar.ph: ; DATA_AND_CONTROL-NEXT: br label [[WHILE_BODY:%.*]] -; DATA_AND_CONTROL: while.body: -; DATA_AND_CONTROL-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; DATA_AND_CONTROL-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] -; DATA_AND_CONTROL-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4 -; DATA_AND_CONTROL-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; DATA_AND_CONTROL-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; DATA_AND_CONTROL-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] ; DATA_AND_CONTROL: while.end.loopexit: ; DATA_AND_CONTROL-NEXT: ret void ; @@ -190,16 +163,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP12:%.*]] = xor i1 [[TMP15]], true ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; DATA_AND_CONTROL_NO_RT_CHECK: middle.block: -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; DATA_AND_CONTROL_NO_RT_CHECK: scalar.ph: ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: br label [[WHILE_BODY:%.*]] -; DATA_AND_CONTROL_NO_RT_CHECK: while.body: -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4 -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] ; DATA_AND_CONTROL_NO_RT_CHECK: while.end.loopexit: ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll index 038330b..c261760 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll @@ -22,21 +22,6 @@ define void @load_store_interleave_group_tc_2(ptr noalias %data) { ; VF2-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF2: [[MIDDLE_BLOCK]]: ; VF2-NEXT: br label %[[EXIT:.*]] -; VF2: [[SCALAR_PH:.*]]: -; VF2-NEXT: br label %[[LOOP:.*]] -; VF2: [[LOOP]]: -; VF2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2-NEXT: [[MUL_2:%.*]] = shl nsw i64 [[IV]], 1 -; VF2-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[MUL_2]] -; VF2-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8 -; VF2-NEXT: store i64 [[L_0]], ptr [[DATA_0]], align 8 -; VF2-NEXT: [[ADD_1:%.*]] = or disjoint i64 [[MUL_2]], 1 -; VF2-NEXT: [[DATA_1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[ADD_1]] -; VF2-NEXT: [[L_1:%.*]] = load i64, ptr [[DATA_1]], align 8 -; VF2-NEXT: store i64 [[L_1]], ptr [[DATA_1]], align 8 -; VF2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; VF2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 2 -; VF2-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; VF2: [[EXIT]]: ; VF2-NEXT: ret void ; @@ -86,33 +71,18 @@ define void @load_store_interleave_group_tc_2(ptr noalias %data) { ; VF4-NEXT: br i1 false, label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6:.*]] ; VF4: [[PRED_STORE_IF5]]: ; VF4-NEXT: [[TMP27:%.*]] = shl nsw i64 3, 1 -; VF4-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP27]] -; VF4-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8 -; VF4-NEXT: store i64 [[TMP29]], ptr [[TMP28]], align 8 -; VF4-NEXT: [[TMP30:%.*]] = or disjoint i64 [[TMP27]], 1 -; VF4-NEXT: [[TMP31:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP30]] -; VF4-NEXT: [[TMP32:%.*]] = load i64, ptr [[TMP31]], align 8 -; VF4-NEXT: store i64 [[TMP32]], ptr [[TMP31]], align 8 -; VF4-NEXT: br label %[[PRED_STORE_CONTINUE6]] -; VF4: [[PRED_STORE_CONTINUE6]]: -; VF4-NEXT: br label %[[MIDDLE_BLOCK:.*]] -; VF4: [[MIDDLE_BLOCK]]: -; VF4-NEXT: br label %[[EXIT:.*]] -; VF4: [[SCALAR_PH:.*]]: -; VF4-NEXT: br label %[[LOOP:.*]] -; VF4: [[LOOP]]: -; VF4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF4-NEXT: [[MUL_2:%.*]] = shl nsw i64 [[IV]], 1 -; VF4-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[MUL_2]] +; VF4-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP27]] ; VF4-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8 ; VF4-NEXT: store i64 [[L_0]], ptr [[DATA_0]], align 8 -; VF4-NEXT: [[ADD_1:%.*]] = or disjoint i64 [[MUL_2]], 1 +; VF4-NEXT: [[ADD_1:%.*]] = or disjoint i64 [[TMP27]], 1 ; VF4-NEXT: [[DATA_1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[ADD_1]] ; VF4-NEXT: [[L_1:%.*]] = load i64, ptr [[DATA_1]], align 8 ; VF4-NEXT: store i64 [[L_1]], ptr [[DATA_1]], align 8 -; VF4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; VF4-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 2 -; VF4-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] +; VF4-NEXT: br label %[[PRED_STORE_CONTINUE6]] +; VF4: [[PRED_STORE_CONTINUE6]]: +; VF4-NEXT: br label %[[MIDDLE_BLOCK:.*]] +; VF4: [[MIDDLE_BLOCK]]: +; VF4-NEXT: br label %[[EXIT:.*]] ; VF4: [[EXIT]]: ; VF4-NEXT: ret void ; @@ -237,27 +207,6 @@ define void @test_complex_add_float_tc_4(ptr %res, ptr noalias %A, ptr noalias % ; VF2-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF2: [[MIDDLE_BLOCK]]: ; VF2-NEXT: br label %[[EXIT:.*]] -; VF2: [[SCALAR_PH:.*]]: -; VF2-NEXT: br label %[[LOOP:.*]] -; VF2: [[LOOP]]: -; VF2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2-NEXT: [[GEP_A_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i64 [[IV]] -; VF2-NEXT: [[GEP_B_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i64 [[IV]] -; VF2-NEXT: [[L_A_0:%.*]] = load float, ptr [[GEP_A_0]], align 4 -; VF2-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_A_0]], i64 4 -; VF2-NEXT: [[L_A_1:%.*]] = load float, ptr [[GEP_A_1]], align 4 -; VF2-NEXT: [[L_B_0:%.*]] = load float, ptr [[GEP_B_0]], align 4 -; VF2-NEXT: [[ADD_0:%.*]] = fadd float [[L_A_0]], [[L_B_0]] -; VF2-NEXT: [[GEP_B_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_B_0]], i64 4 -; VF2-NEXT: [[L_B_1:%.*]] = load float, ptr [[GEP_B_1]], align 4 -; VF2-NEXT: [[ADD_1:%.*]] = fadd float [[L_A_1]], [[L_B_1]] -; VF2-NEXT: [[GEP_RES_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RES]], i64 [[IV]] -; VF2-NEXT: store float [[ADD_0]], ptr [[GEP_RES_0]], align 4 -; VF2-NEXT: [[GEP_RES_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_RES_0]], i64 4 -; VF2-NEXT: store float [[ADD_1]], ptr [[GEP_RES_1]], align 4 -; VF2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; VF2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 4 -; VF2-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; VF2: [[EXIT]]: ; VF2-NEXT: ret void ; @@ -282,27 +231,6 @@ define void @test_complex_add_float_tc_4(ptr %res, ptr noalias %A, ptr noalias % ; VF4-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF4: [[MIDDLE_BLOCK]]: ; VF4-NEXT: br label %[[EXIT:.*]] -; VF4: [[SCALAR_PH:.*]]: -; VF4-NEXT: br label %[[LOOP:.*]] -; VF4: [[LOOP]]: -; VF4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF4-NEXT: [[GEP_A_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i64 [[IV]] -; VF4-NEXT: [[GEP_B_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i64 [[IV]] -; VF4-NEXT: [[L_A_0:%.*]] = load float, ptr [[GEP_A_0]], align 4 -; VF4-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_A_0]], i64 4 -; VF4-NEXT: [[L_A_1:%.*]] = load float, ptr [[GEP_A_1]], align 4 -; VF4-NEXT: [[L_B_0:%.*]] = load float, ptr [[GEP_B_0]], align 4 -; VF4-NEXT: [[ADD_0:%.*]] = fadd float [[L_A_0]], [[L_B_0]] -; VF4-NEXT: [[GEP_B_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_B_0]], i64 4 -; VF4-NEXT: [[L_B_1:%.*]] = load float, ptr [[GEP_B_1]], align 4 -; VF4-NEXT: [[ADD_1:%.*]] = fadd float [[L_A_1]], [[L_B_1]] -; VF4-NEXT: [[GEP_RES_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RES]], i64 [[IV]] -; VF4-NEXT: store float [[ADD_0]], ptr [[GEP_RES_0]], align 4 -; VF4-NEXT: [[GEP_RES_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_RES_0]], i64 4 -; VF4-NEXT: store float [[ADD_1]], ptr [[GEP_RES_1]], align 4 -; VF4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; VF4-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 4 -; VF4-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; VF4: [[EXIT]]: ; VF4-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll index a044ae8..d290f2d 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll @@ -27,21 +27,6 @@ define void @load_store_interleave_group(ptr noalias %data) { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MUL_2:%.*]] = shl nsw i64 [[IV]], 1 -; CHECK-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[MUL_2]] -; CHECK-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8 -; CHECK-NEXT: store i64 [[L_0]], ptr [[DATA_0]], align 8 -; CHECK-NEXT: [[ADD_1:%.*]] = or disjoint i64 [[MUL_2]], 1 -; CHECK-NEXT: [[DATA_1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[ADD_1]] -; CHECK-NEXT: [[L_1:%.*]] = load i64, ptr [[DATA_1]], align 8 -; CHECK-NEXT: store i64 [[L_1]], ptr [[DATA_1]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -105,25 +90,6 @@ define void @test_2xi64_with_wide_load(ptr noalias %data, ptr noalias %factor) { ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[IV]] -; CHECK-NEXT: [[L_FACTOR:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[TMP13:%.*]] = shl nsw i64 [[IV]], 1 -; CHECK-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP13]] -; CHECK-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8 -; CHECK-NEXT: [[MUL_0:%.*]] = mul i64 [[L_FACTOR]], [[L_0]] -; CHECK-NEXT: store i64 [[MUL_0]], ptr [[DATA_0]], align 8 -; CHECK-NEXT: [[TMP14:%.*]] = or disjoint i64 [[TMP13]], 1 -; CHECK-NEXT: [[DATA_1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP14]] -; CHECK-NEXT: [[L_1:%.*]] = load i64, ptr [[DATA_1]], align 8 -; CHECK-NEXT: [[MUL_1:%.*]] = mul i64 [[L_FACTOR]], [[L_1]] -; CHECK-NEXT: store i64 [[MUL_1]], ptr [[DATA_1]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-insertelt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-insertelt.ll index edb9519..187edb5 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-insertelt.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-insertelt.ll @@ -49,23 +49,6 @@ define void @test0(ptr noalias %M3, ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_INC1286_LOOPEXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[IF_THEN1165_US:%.*]] -; CHECK: if.then1165.us: -; CHECK-NEXT: [[INDVARS_IV1783:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT1784:%.*]], [[IF_THEN1165_US]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 [[INDVARS_IV1783]] -; CHECK-NEXT: [[L_A:%.*]] = load i16, ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[CONV1177_US:%.*]] = zext i16 [[L_A]] to i32 -; CHECK-NEXT: [[ADD1178_US:%.*]] = add nsw i32 [[CONV1177_US]], 10 -; CHECK-NEXT: [[CONV1179_US:%.*]] = trunc i32 [[ADD1178_US]] to i16 -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV1783]] -; CHECK-NEXT: [[L_B:%.*]] = load i64, ptr [[GEP_B]], align 8 -; CHECK-NEXT: [[IDXPROM1181_US:%.*]] = ashr exact i64 [[L_B]], 32 -; CHECK-NEXT: [[ARRAYIDX1185_US:%.*]] = getelementptr inbounds i16, ptr [[M3]], i64 [[IDXPROM1181_US]] -; CHECK-NEXT: store i16 [[CONV1179_US]], ptr [[ARRAYIDX1185_US]], align 2 -; CHECK-NEXT: [[INDVARS_IV_NEXT1784]] = add nuw nsw i64 [[INDVARS_IV1783]], 1 -; CHECK-NEXT: [[EXITCOND1785:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT1784]], 16 -; CHECK-NEXT: br i1 [[EXITCOND1785]], label [[FOR_INC1286_LOOPEXIT]], label [[IF_THEN1165_US]] ; CHECK: for.inc1286.loopexit: ; CHECK-NEXT: ret void ; @@ -141,24 +124,6 @@ define void @test1(ptr noalias %M3, ptr noalias %A, ptr noalias %B, ptr noalias ; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_INC1286_LOOPEXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[IF_THEN1165_US:%.*]] -; CHECK: if.then1165.us: -; CHECK-NEXT: [[INDVARS_IV1783:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT1784:%.*]], [[IF_THEN1165_US]] ] -; CHECK-NEXT: [[FPTR:%.*]] = load i32, ptr [[C]], align 4 -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 [[INDVARS_IV1783]] -; CHECK-NEXT: [[L_A:%.*]] = load i16, ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[CONV1177_US:%.*]] = zext i16 [[L_A]] to i32 -; CHECK-NEXT: [[ADD1178_US:%.*]] = add nsw i32 [[CONV1177_US]], [[FPTR]] -; CHECK-NEXT: [[CONV1179_US:%.*]] = trunc i32 [[ADD1178_US]] to i16 -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV1783]] -; CHECK-NEXT: [[L_B:%.*]] = load i64, ptr [[GEP_B]], align 8 -; CHECK-NEXT: [[IDXPROM1181_US:%.*]] = ashr exact i64 [[L_B]], 32 -; CHECK-NEXT: [[ARRAYIDX1185_US:%.*]] = getelementptr inbounds i16, ptr [[M3]], i64 [[IDXPROM1181_US]] -; CHECK-NEXT: store i16 [[CONV1179_US]], ptr [[ARRAYIDX1185_US]], align 2 -; CHECK-NEXT: [[INDVARS_IV_NEXT1784]] = add nuw nsw i64 [[INDVARS_IV1783]], 1 -; CHECK-NEXT: [[EXITCOND1785:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT1784]], 16 -; CHECK-NEXT: br i1 [[EXITCOND1785]], label [[FOR_INC1286_LOOPEXIT]], label [[IF_THEN1165_US]] ; CHECK: for.inc1286.loopexit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/widen-call-with-intrinsic-or-libfunc.ll b/llvm/test/Transforms/LoopVectorize/AArch64/widen-call-with-intrinsic-or-libfunc.ll index c8eecd7..96a25a8 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/widen-call-with-intrinsic-or-libfunc.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/widen-call-with-intrinsic-or-libfunc.ll @@ -127,7 +127,8 @@ define void @test(ptr noalias %src, ptr noalias %dst) { ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll b/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll index d5d0c14..bc9cf4f 100644 --- a/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll +++ b/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll @@ -23,11 +23,7 @@ define half @vectorize_v2f16_loop(ptr addrspace(1) noalias %s) { ; GFX9-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; GFX9-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; GFX9: middle.block: -; GFX9-NEXT: br label [[FOR_END:%.*]] -; GFX9: scalar.ph: ; GFX9-NEXT: br label [[FOR_BODY:%.*]] -; GFX9: for.body: -; GFX9-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; GFX9: for.end: ; GFX9-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP3]], [[TMP2]] ; GFX9-NEXT: [[ADD_LCSSA:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> [[BIN_RDX]]) @@ -52,11 +48,7 @@ define half @vectorize_v2f16_loop(ptr addrspace(1) noalias %s) { ; VI-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; VI-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VI: middle.block: -; VI-NEXT: br label [[FOR_END:%.*]] -; VI: scalar.ph: ; VI-NEXT: br label [[FOR_BODY:%.*]] -; VI: for.body: -; VI-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; VI: for.end: ; VI-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP3]], [[TMP2]] ; VI-NEXT: [[ADD_LCSSA:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> [[BIN_RDX]]) diff --git a/llvm/test/Transforms/LoopVectorize/ARM/active-lane-mask.ll b/llvm/test/Transforms/LoopVectorize/ARM/active-lane-mask.ll index e83ac2e..58a24ee 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/active-lane-mask.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/active-lane-mask.ll @@ -36,18 +36,6 @@ define void @f0(ptr noalias %dst, ptr readonly %src, i64 %n) #0 { ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END_LOOPEXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[TMP10]], 3 -; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i8 [[MUL]], ptr [[ARRAYIDX3]], align 1 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[FOR_END_LOOPEXIT]]: ; CHECK-NEXT: br label %[[FOR_END]] ; CHECK: [[FOR_END]]: @@ -81,7 +69,4 @@ attributes #0 = { nofree norecurse nounwind "target-features"="+armv8.1-m.main,+ ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]], [[META5:![0-9]+]]} -; CHECK: [[META4]] = !{!"llvm.loop.vectorize.width", i32 16} -; CHECK: [[META5]] = !{!"llvm.loop.interleave.count", i32 2} ;. diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll index e52d85c..9a76019 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll @@ -25,21 +25,7 @@ define void @test_stride1_4i32(ptr readonly %data, ptr noalias nocapture %dst, i ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 1 -; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2 -; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]] -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4 -; CHECK-NEXT: [[ADD7:%.*]] = add nsw i32 5, [[TMP8]] -; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[I_023]] -; CHECK-NEXT: store i32 [[ADD7]], ptr [[ARRAYIDX9]], align 4 -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_023]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[END]], label [[FOR_BODY]] ; CHECK: end: ; CHECK-NEXT: ret void ; @@ -212,21 +198,7 @@ define void @test_stride3_4i32(ptr readonly %data, ptr noalias nocapture %dst, i ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 3 -; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2 -; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]] -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4 -; CHECK-NEXT: [[ADD7:%.*]] = add nsw i32 5, [[TMP8]] -; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[I_023]] -; CHECK-NEXT: store i32 [[ADD7]], ptr [[ARRAYIDX9]], align 4 -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_023]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[END]], label [[FOR_BODY]] ; CHECK: end: ; CHECK-NEXT: ret void ; @@ -273,21 +245,7 @@ define void @test_stride4_4i32(ptr readonly %data, ptr noalias nocapture %dst, i ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 4 -; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2 -; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]] -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4 -; CHECK-NEXT: [[ADD7:%.*]] = add nsw i32 5, [[TMP8]] -; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[I_023]] -; CHECK-NEXT: store i32 [[ADD7]], ptr [[ARRAYIDX9]], align 4 -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_023]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[END]], label [[FOR_BODY]] ; CHECK: end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll index 4cdfcf2..0a4ed7f 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll @@ -22,11 +22,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP2]] ; @@ -75,11 +71,7 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP8]] ; @@ -126,11 +118,7 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[PROD_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: ret i32 [[PROD_0_LCSSA]] @@ -177,11 +165,7 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] @@ -228,11 +212,7 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] @@ -279,11 +259,7 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] @@ -330,11 +306,7 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP4]]) ; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] @@ -381,11 +353,7 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP4]]) ; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll index fc79227..029d8bd 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll @@ -34,28 +34,11 @@ define i32 @mla_i32(ptr noalias nocapture readonly %A, ptr noalias nocapture rea ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[I_011]] -; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP12]] to i32 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[I_011]] -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CONV2:%.*]] = sext i8 [[TMP13]] to i32 -; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]] -; CHECK-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[RES_010]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_011]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP10]], [[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[RES_0_LCSSA]] ; entry: @@ -112,28 +95,11 @@ define i32 @mla_i8(ptr noalias nocapture readonly %A, ptr noalias nocapture read ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[I_011]] -; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP12]] to i32 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[I_011]] -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CONV2:%.*]] = sext i8 [[TMP13]] to i32 -; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]] -; CHECK-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[RES_010]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_011]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP10]], [[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[RES_0_LCSSA]] ; entry: @@ -183,25 +149,13 @@ define i32 @add_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP7]], [[R_07]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP5]], [[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[R_0_LCSSA]] ; entry: @@ -245,26 +199,14 @@ define i32 @mul_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP3]], <4 x i32> [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP4]]) -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 1, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = mul nsw i32 [[TMP7]], [[R_07]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[R_0_LCSSA]] ; entry: @@ -308,26 +250,14 @@ define i32 @and_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP3]], <4 x i32> [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP4]]) -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ -1, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = and i32 [[TMP7]], [[R_07]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ -1, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ -1, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[R_0_LCSSA]] ; entry: @@ -371,26 +301,14 @@ define i32 @or_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP3]], <4 x i32> [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP4]]) -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = or i32 [[TMP7]], [[R_07]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[R_0_LCSSA]] ; entry: @@ -434,26 +352,14 @@ define i32 @xor_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP3]], <4 x i32> [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP4]]) -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = xor i32 [[TMP7]], [[R_07]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[R_0_LCSSA]] ; entry: @@ -497,26 +403,14 @@ define float @fadd_f32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select fast <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> [[TMP3]], <4 x float> [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP4]]) -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = fadd fast float [[TMP7]], [[R_07]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ] ; CHECK-NEXT: ret float [[R_0_LCSSA]] ; entry: @@ -560,26 +454,14 @@ define float @fmul_f32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select fast <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> [[TMP3]], <4 x float> [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP4]]) -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ 1.000000e+00, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = fmul fast float [[TMP7]], [[R_07]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi float [ 1.000000e+00, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi float [ 1.000000e+00, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ] ; CHECK-NEXT: ret float [[R_0_LCSSA]] ; entry: @@ -622,7 +504,7 @@ define i32 @smin_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[TMP3]], <4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] @@ -640,7 +522,7 @@ define i32 @smin_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[ADD]] = select i1 [[C]], i32 [[R_07]], i32 [[TMP7]] ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: for.cond.cleanup.loopexit: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] @@ -689,7 +571,7 @@ define i32 @smax_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[TMP3]], <4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] @@ -707,7 +589,7 @@ define i32 @smax_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[ADD]] = select i1 [[C]], i32 [[R_07]], i32 [[TMP7]] ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: for.cond.cleanup.loopexit: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] @@ -756,7 +638,7 @@ define i32 @umin_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[TMP3]], <4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] @@ -774,7 +656,7 @@ define i32 @umin_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[ADD]] = select i1 [[C]], i32 [[R_07]], i32 [[TMP7]] ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: for.cond.cleanup.loopexit: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] @@ -823,7 +705,7 @@ define i32 @umax_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[TMP3]], <4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] @@ -841,7 +723,7 @@ define i32 @umax_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[ADD]] = select i1 [[C]], i32 [[R_07]], i32 [[TMP7]] ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: for.cond.cleanup.loopexit: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] diff --git a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll index 3426fb1..6ea075f 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll @@ -30,17 +30,6 @@ define void @always_vectorize(ptr %p, i32 %x) { ; DEFAULT-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]] -; DEFAULT-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -59,17 +48,6 @@ define void @always_vectorize(ptr %p, i32 %x) { ; OPTSIZE-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; OPTSIZE: [[MIDDLE_BLOCK]]: ; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; OPTSIZE: [[SCALAR_PH:.*]]: -; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]] -; OPTSIZE: [[FOR_BODY]]: -; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]] -; OPTSIZE-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4 -; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; OPTSIZE: [[FOR_COND_CLEANUP]]: ; OPTSIZE-NEXT: ret void ; @@ -88,17 +66,6 @@ define void @always_vectorize(ptr %p, i32 %x) { ; MINSIZE-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; MINSIZE: [[MIDDLE_BLOCK]]: ; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MINSIZE: [[SCALAR_PH:.*]]: -; MINSIZE-NEXT: br label %[[FOR_BODY:.*]] -; MINSIZE: [[FOR_BODY]]: -; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]] -; MINSIZE-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4 -; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; MINSIZE: [[FOR_COND_CLEANUP]]: ; MINSIZE-NEXT: ret void ; @@ -386,23 +353,6 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n) ; DEFAULT-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[TMP72:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i8 -; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP72]] -; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP72]], 1 -; DEFAULT-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]] -; DEFAULT-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]] -; DEFAULT-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP72]], 2 -; DEFAULT-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]] -; DEFAULT-NEXT: [[ADD10:%.*]] = add i8 [[ADD]], [[MUL9]] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDVARS_IV]] -; DEFAULT-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -502,23 +452,6 @@ define void @dont_vectorize_with_minsize() { ; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; DEFAULT-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; DEFAULT-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; DEFAULT-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -545,23 +478,6 @@ define void @dont_vectorize_with_minsize() { ; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; OPTSIZE: [[MIDDLE_BLOCK]]: ; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; OPTSIZE: [[SCALAR_PH:.*]]: -; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]] -; OPTSIZE: [[FOR_BODY]]: -; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; OPTSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; OPTSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; OPTSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; OPTSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; OPTSIZE: [[FOR_COND_CLEANUP]]: ; OPTSIZE-NEXT: ret void ; @@ -588,23 +504,6 @@ define void @dont_vectorize_with_minsize() { ; MINSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; MINSIZE: [[MIDDLE_BLOCK]]: ; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MINSIZE: [[SCALAR_PH:.*]]: -; MINSIZE-NEXT: br label %[[FOR_BODY:.*]] -; MINSIZE: [[FOR_BODY]]: -; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; MINSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; MINSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; MINSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; MINSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; MINSIZE: [[FOR_COND_CLEANUP]]: ; MINSIZE-NEXT: ret void ; @@ -659,23 +558,6 @@ define void @vectorization_forced() { ; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; DEFAULT-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; DEFAULT-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; DEFAULT-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -702,23 +584,6 @@ define void @vectorization_forced() { ; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; OPTSIZE: [[MIDDLE_BLOCK]]: ; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; OPTSIZE: [[SCALAR_PH:.*]]: -; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]] -; OPTSIZE: [[FOR_BODY]]: -; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; OPTSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; OPTSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; OPTSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; OPTSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; OPTSIZE: [[FOR_COND_CLEANUP]]: ; OPTSIZE-NEXT: ret void ; @@ -745,23 +610,6 @@ define void @vectorization_forced() { ; MINSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; MINSIZE: [[MIDDLE_BLOCK]]: ; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MINSIZE: [[SCALAR_PH:.*]]: -; MINSIZE-NEXT: br label %[[FOR_BODY:.*]] -; MINSIZE: [[FOR_BODY]]: -; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; MINSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; MINSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; MINSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; MINSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; MINSIZE: [[FOR_COND_CLEANUP]]: ; MINSIZE-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-loop-hint.ll b/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-loop-hint.ll index 625f7a6..1ae71c8 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-loop-hint.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-loop-hint.ll @@ -52,7 +52,7 @@ define dso_local void @predicate_loop_hint(ptr noalias nocapture %A, ptr noalias ; CHECK: %index.next = add nuw i64 %index, 4 ; CHECK: br i1 %{{.*}}, label %{{.*}}, label %vector.body, !llvm.loop [[VEC_LOOP2:![0-9]+]] ; -; CHECK: br i1 %{{.*}}, label %{{.*}}, label %for.body, !llvm.loop [[SCALAR_LOOP2:![0-9]+]] +; CHECK-NOT: br i1 %{{.*}}, label %{{.*}}, label %for.body, !llvm.loop entry: br label %for.body @@ -78,9 +78,6 @@ for.body: ; CHECK-NEXT: [[MD_RT_UNROLL_DIS]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK-NEXT: [[SCALAR_LOOP1]] = distinct !{[[SCALAR_LOOP1]], [[MD_RT_UNROLL_DIS]], [[MD_IS_VEC]]} ; CHECK-NEXT: [[VEC_LOOP2]] = distinct !{[[VEC_LOOP2]], [[MD_IS_VEC]], [[MD_RT_UNROLL_DIS]]} -; CHECK-NEXT: [[SCALAR_LOOP2]] = distinct !{[[SCALAR_LOOP2]], [[ORIG_PRED_ENABLED:!.+]], [[ORIG_VEC_ENABLED:!.+]]} -; CHECK-NEXT: [[ORIG_PRED_ENABLED]] = !{!"llvm.loop.vectorize.predicate.enable", i1 true} -; CHECK-NEXT: [[ORIG_VEC_ENABLED]] = !{!"llvm.loop.vectorize.enable", i1 true} !6 = distinct !{!6, !7, !8} !7 = !{!"llvm.loop.vectorize.predicate.enable", i1 true} diff --git a/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll b/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll index 0b13343..7afa8ce9 100644 --- a/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll +++ b/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll @@ -33,18 +33,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v) { ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll index a7f0206..024194d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll @@ -46,19 +46,6 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) { ; ZVFBFMIN-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; ZVFBFMIN: [[MIDDLE_BLOCK]]: ; ZVFBFMIN-NEXT: br label %[[EXIT:.*]] -; ZVFBFMIN: [[SCALAR_PH:.*]]: -; ZVFBFMIN-NEXT: br label %[[LOOP:.*]] -; ZVFBFMIN: [[LOOP]]: -; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] -; ZVFBFMIN-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]] -; ZVFBFMIN-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]] -; ZVFBFMIN-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP]], align 2 -; ZVFBFMIN-NEXT: [[Y:%.*]] = load bfloat, ptr [[B_GEP]], align 2 -; ZVFBFMIN-NEXT: [[Z:%.*]] = fadd bfloat [[X]], [[Y]] -; ZVFBFMIN-NEXT: store bfloat [[Z]], ptr [[A_GEP]], align 2 -; ZVFBFMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1 -; ZVFBFMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]] -; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; ZVFBFMIN: [[EXIT]]: ; ZVFBFMIN-NEXT: ret void ; @@ -155,23 +142,6 @@ define void @vfwmaccbf16.vv(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 ; ZVFBFMIN-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; ZVFBFMIN: [[MIDDLE_BLOCK]]: ; ZVFBFMIN-NEXT: br label %[[EXIT:.*]] -; ZVFBFMIN: [[SCALAR_PH:.*]]: -; ZVFBFMIN-NEXT: br label %[[LOOP:.*]] -; ZVFBFMIN: [[LOOP]]: -; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] -; ZVFBFMIN-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]] -; ZVFBFMIN-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]] -; ZVFBFMIN-NEXT: [[C_GEP:%.*]] = getelementptr float, ptr [[C]], i64 [[I]] -; ZVFBFMIN-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP]], align 2 -; ZVFBFMIN-NEXT: [[Y:%.*]] = load bfloat, ptr [[B_GEP]], align 2 -; ZVFBFMIN-NEXT: [[Z:%.*]] = load float, ptr [[C_GEP]], align 4 -; ZVFBFMIN-NEXT: [[X_EXT:%.*]] = fpext bfloat [[X]] to float -; ZVFBFMIN-NEXT: [[Y_EXT:%.*]] = fpext bfloat [[Y]] to float -; ZVFBFMIN-NEXT: [[FMULADD:%.*]] = call float @llvm.fmuladd.f32(float [[X_EXT]], float [[Y_EXT]], float [[Z]]) -; ZVFBFMIN-NEXT: store float [[FMULADD]], ptr [[C_GEP]], align 4 -; ZVFBFMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1 -; ZVFBFMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]] -; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; ZVFBFMIN: [[EXIT]]: ; ZVFBFMIN-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll index 612e7c0..2087218 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll @@ -33,24 +33,6 @@ define void @block_with_dead_inst_1(ptr %src, i64 %N) #0 { ; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i16 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[DEAD_GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 -; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[TMP25]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -106,24 +88,6 @@ define void @block_with_dead_inst_2(ptr %src) #0 { ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i16 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: [[DEAD_GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -179,27 +143,6 @@ define void @multiple_blocks_with_dead_insts_3(ptr %src) #0 { ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i16 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -262,29 +205,6 @@ define void @multiple_blocks_with_dead_insts_4(ptr %src, i64 %N) #0 { ; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i16 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]] -; CHECK: [[THEN]]: -; CHECK-NEXT: br label %[[THEN_1:.*]] -; CHECK: [[THEN_1]]: -; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -347,31 +267,6 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_5(ptr %src) #0 { ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i16 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]] -; CHECK: [[THEN]]: -; CHECK-NEXT: br label %[[THEN_1:.*]] -; CHECK: [[THEN_1]]: -; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[ELSE_2:.*]] -; CHECK: [[ELSE_2]]: -; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -450,31 +345,6 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 % ; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i16 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]] -; CHECK: [[THEN]]: -; CHECK-NEXT: br i1 [[IC]], label %[[THEN_1:.*]], label %[[ELSE]] -; CHECK: [[THEN_1]]: -; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[ELSE_2:.*]] -; CHECK: [[ELSE_2]]: -; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -537,24 +407,6 @@ define void @empty_block_with_phi_1(ptr %src, i64 %N) #0 { ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i32 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] -; CHECK: [[THEN]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[P:%.*]] = phi i16 [ [[L]], %[[LOOP_HEADER]] ], [ 99, %[[THEN]] ] -; CHECK-NEXT: store i16 [[P]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 -; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[TMP17]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -607,24 +459,6 @@ define void @empty_block_with_phi_2(ptr %src, i64 %N) #0 { ; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i32 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[P:%.*]] = phi i16 [ [[L]], %[[LOOP_HEADER]] ], [ 99, %[[ELSE]] ] -; CHECK-NEXT: store i16 [[P]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 -; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[TMP18]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll index 96c3a0d..10f8f74 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll @@ -42,16 +42,6 @@ define void @dead_load(ptr %p, i16 %start) { ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[START_EXT]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 0, ptr [[GEP]], align 2 -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 3 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IV]], 111 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -326,21 +316,6 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) { ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 false, label %[[LOOP_LATCH]], label %[[THEN:.*]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[NOT_A:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[P:%.*]] = phi i32 [ [[NOT_A]], %[[THEN]] ], [ 0, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i32 [[P]], ptr [[GEP]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 9 -; CHECK-NEXT: [[EC:%.*]] = icmp slt i64 [[IV]], 322 -; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -408,21 +383,6 @@ define void @gather_interleave_group_with_dead_insert_pos(i64 %N, ptr noalias %s ; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L_DEAD:%.*]] = load i8, ptr [[GEP_SRC_0]], align 1 -; CHECK-NEXT: [[IV_1:%.*]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV_1]] -; CHECK-NEXT: [[L_1:%.*]] = load i8, ptr [[GEP_SRC_1]], align 1 -; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[L_1]] to i32 -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr i32, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i32 [[EXT]], ptr [[GEP_DST]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 2 -; CHECK-NEXT: [[EC:%.*]] = icmp slt i64 [[IV]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll b/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll index b6230dc..3fd90b2 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll @@ -32,18 +32,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -86,21 +75,9 @@ define i64 @vector_add_reduce(ptr noalias nocapture %a) { ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP9]]) -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]] -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: -; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i64 [[TMP11]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll index d20dd05..01b4502 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll @@ -29,18 +29,7 @@ define void @vector_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -61,18 +50,7 @@ define void @vector_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]] -; FIXED-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -113,20 +91,9 @@ define void @vector_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -147,18 +114,7 @@ define void @vector_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]] -; FIXED-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -199,20 +155,9 @@ define void @vector_urem(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[DIVREM:%.*]] = urem i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -233,18 +178,7 @@ define void @vector_urem(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[DIVREM:%.*]] = urem i64 [[ELEM]], [[V]] -; FIXED-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -285,20 +219,9 @@ define void @vector_srem(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[DIVREM:%.*]] = srem i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -319,18 +242,7 @@ define void @vector_srem(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[DIVREM:%.*]] = srem i64 [[ELEM]], [[V]] -; FIXED-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -379,26 +291,9 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0 -; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; CHECK: do_op: -; CHECK-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]] -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -422,24 +317,7 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: -; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0 -; FIXED-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; FIXED: do_op: -; FIXED-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]] -; FIXED-NEXT: br label [[LATCH]] -; FIXED: latch: -; FIXED-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; FIXED-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; FIXED-NEXT: br label [[LATCH:%.*]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -494,26 +372,9 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0 -; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; CHECK: do_op: -; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]] -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -537,24 +398,7 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: -; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0 -; FIXED-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; FIXED: do_op: -; FIXED-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]] -; FIXED-NEXT: br label [[LATCH]] -; FIXED: latch: -; FIXED-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; FIXED-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; FIXED-NEXT: br label [[LATCH:%.*]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -601,26 +445,9 @@ define void @predicated_udiv_by_constant(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP12]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42 -; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; CHECK: do_op: -; CHECK-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], 27 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -641,24 +468,7 @@ define void @predicated_udiv_by_constant(ptr noalias nocapture %a, i64 %n) { ; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: -; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42 -; FIXED-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; FIXED: do_op: -; FIXED-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], 27 -; FIXED-NEXT: br label [[LATCH]] -; FIXED: latch: -; FIXED-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; FIXED-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; FIXED-NEXT: br label [[LATCH:%.*]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -705,26 +515,9 @@ define void @predicated_sdiv_by_constant(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP12]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42 -; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; CHECK: do_op: -; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], 27 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -745,24 +538,7 @@ define void @predicated_sdiv_by_constant(ptr noalias nocapture %a, i64 %n) { ; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: -; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42 -; FIXED-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; FIXED: do_op: -; FIXED-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], 27 -; FIXED-NEXT: br label [[LATCH]] -; FIXED: latch: -; FIXED-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; FIXED-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; FIXED-NEXT: br label [[LATCH:%.*]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -815,26 +591,9 @@ define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[ELEM]], -128 -; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; CHECK: do_op: -; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i8 [[ELEM]], -1 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[PHI:%.*]] = phi i8 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; CHECK-NEXT: store i8 [[PHI]], ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -856,24 +615,7 @@ define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) { ; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: -; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; FIXED-NEXT: [[C:%.*]] = icmp ne i8 [[ELEM]], -128 -; FIXED-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; FIXED: do_op: -; FIXED-NEXT: [[DIVREM:%.*]] = sdiv i8 [[ELEM]], -1 -; FIXED-NEXT: br label [[LATCH]] -; FIXED: latch: -; FIXED-NEXT: [[PHI:%.*]] = phi i8 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; FIXED-NEXT: store i8 [[PHI]], ptr [[ARRAYIDX]], align 1 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; FIXED-NEXT: br label [[LATCH:%.*]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -945,7 +687,7 @@ define i32 @udiv_sdiv_with_invariant_divisors(i8 %x, i16 %y, i1 %c) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i8> [[VEC_IND]], [[BROADCAST_SPLAT7]] ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP16:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-NEXT: [[TMP17:%.*]] = mul nuw i32 [[TMP16]], 2 @@ -972,7 +714,7 @@ define i32 @udiv_sdiv_with_invariant_divisors(i8 %x, i16 %y, i1 %c) { ; CHECK-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i16 [[IV_NEXT]], 0 ; CHECK-NEXT: [[IV_NEXT_TRUNC]] = trunc i16 [[IV_NEXT]] to i8 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: [[MERGE_LCSSA:%.*]] = phi i32 [ [[MERGE]], [[LOOP_LATCH]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[MERGE_LCSSA]] @@ -1004,28 +746,9 @@ define i32 @udiv_sdiv_with_invariant_divisors(i8 %x, i16 %y, i1 %c) { ; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; FIXED: middle.block: ; FIXED-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[PREDPHI]], i32 3 -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: -; FIXED-NEXT: br label [[LOOP_HEADER:%.*]] -; FIXED: loop.header: -; FIXED-NEXT: [[IV:%.*]] = phi i16 [ -12, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; FIXED-NEXT: [[NARROW_IV:%.*]] = phi i8 [ -12, [[SCALAR_PH]] ], [ [[IV_NEXT_TRUNC:%.*]], [[LOOP_LATCH]] ] -; FIXED-NEXT: br i1 [[C]], label [[LOOP_LATCH]], label [[THEN:%.*]] -; FIXED: then: -; FIXED-NEXT: [[UD:%.*]] = udiv i8 [[NARROW_IV]], [[X]] -; FIXED-NEXT: [[UD_EXT:%.*]] = zext i8 [[UD]] to i16 -; FIXED-NEXT: [[SD:%.*]] = sdiv i16 [[UD_EXT]], [[Y]] -; FIXED-NEXT: [[SD_EXT:%.*]] = sext i16 [[SD]] to i32 -; FIXED-NEXT: br label [[LOOP_LATCH]] -; FIXED: loop.latch: -; FIXED-NEXT: [[MERGE:%.*]] = phi i32 [ 0, [[LOOP_HEADER]] ], [ [[SD_EXT]], [[THEN]] ] -; FIXED-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], 1 -; FIXED-NEXT: [[EC:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; FIXED-NEXT: [[IV_NEXT_TRUNC]] = trunc i16 [[IV_NEXT]] to i8 -; FIXED-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]] +; FIXED-NEXT: br label [[LOOP_LATCH:%.*]] ; FIXED: exit: -; FIXED-NEXT: [[MERGE_LCSSA:%.*]] = phi i32 [ [[MERGE]], [[LOOP_LATCH]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] -; FIXED-NEXT: ret i32 [[MERGE_LCSSA]] +; FIXED-NEXT: ret i32 [[TMP7]] ; entry: br label %loop.header diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll index 0a60556..21272cb 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll @@ -30,16 +30,7 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) { ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY1:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]] -; CHECK-NEXT: store i64 [[IV1]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY1]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; @@ -84,18 +75,7 @@ define void @test_wide_ptr_induction(ptr noalias %a, ptr noalias %b, i64 %N) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[B]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[ADDR]], i64 8 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store ptr [[ADDR]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll index a2ab7c4..143a51d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll @@ -46,19 +46,6 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) { ; ZVFHMIN-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; ZVFHMIN: [[MIDDLE_BLOCK]]: ; ZVFHMIN-NEXT: br label %[[EXIT:.*]] -; ZVFHMIN: [[SCALAR_PH:.*]]: -; ZVFHMIN-NEXT: br label %[[LOOP:.*]] -; ZVFHMIN: [[LOOP]]: -; ZVFHMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] -; ZVFHMIN-NEXT: [[A_GEP:%.*]] = getelementptr half, ptr [[A]], i64 [[I]] -; ZVFHMIN-NEXT: [[B_GEP:%.*]] = getelementptr half, ptr [[B]], i64 [[I]] -; ZVFHMIN-NEXT: [[X:%.*]] = load half, ptr [[A_GEP]], align 2 -; ZVFHMIN-NEXT: [[Y:%.*]] = load half, ptr [[B_GEP]], align 2 -; ZVFHMIN-NEXT: [[Z:%.*]] = fadd half [[X]], [[Y]] -; ZVFHMIN-NEXT: store half [[Z]], ptr [[A_GEP]], align 2 -; ZVFHMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1 -; ZVFHMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]] -; ZVFHMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; ZVFHMIN: [[EXIT]]: ; ZVFHMIN-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll index 5df4f70..1c6954c 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll @@ -116,17 +116,7 @@ define void @predicated_strided_store(ptr %start) { ; RVA23-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RVA23-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; RVA23: middle.block: -; RVA23-NEXT: br label [[EXIT:%.*]] -; RVA23: scalar.ph: ; RVA23-NEXT: br label [[LOOP:%.*]] -; RVA23: loop: -; RVA23-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; RVA23-NEXT: [[TMP8:%.*]] = mul i64 [[IV]], 7 -; RVA23-NEXT: [[ADD_PTR:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP8]] -; RVA23-NEXT: store i8 0, ptr [[ADD_PTR]], align 1 -; RVA23-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; RVA23-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 585 -; RVA23-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]] ; RVA23: exit: ; RVA23-NEXT: ret void ; @@ -153,17 +143,7 @@ define void @predicated_strided_store(ptr %start) { ; RVA23ZVL1024B-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RVA23ZVL1024B-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; RVA23ZVL1024B: middle.block: -; RVA23ZVL1024B-NEXT: br label [[EXIT:%.*]] -; RVA23ZVL1024B: scalar.ph: ; RVA23ZVL1024B-NEXT: br label [[LOOP:%.*]] -; RVA23ZVL1024B: loop: -; RVA23ZVL1024B-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; RVA23ZVL1024B-NEXT: [[TMP8:%.*]] = mul i64 [[IV]], 7 -; RVA23ZVL1024B-NEXT: [[ADD_PTR:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP8]] -; RVA23ZVL1024B-NEXT: store i8 0, ptr [[ADD_PTR]], align 1 -; RVA23ZVL1024B-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; RVA23ZVL1024B-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 585 -; RVA23ZVL1024B-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]] ; RVA23ZVL1024B: exit: ; RVA23ZVL1024B-NEXT: ret void ; @@ -216,21 +196,7 @@ define void @store_to_addr_generated_from_invariant_addr(ptr noalias %p0, ptr no ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr i32, ptr [[P1]], i64 [[IV]] -; CHECK-NEXT: store ptr [[P0]], ptr [[ARRAYIDX11]], align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr [[P2]], align 4 -; CHECK-NEXT: [[BITS_TO_GO:%.*]] = getelementptr i8, ptr [[P3]], i64 [[TMP10]] -; CHECK-NEXT: store i32 0, ptr [[BITS_TO_GO]], align 4 -; CHECK-NEXT: store i32 0, ptr [[BITS_TO_GO]], align 4 -; CHECK-NEXT: store i8 0, ptr [[BITS_TO_GO]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll index 4d97a65..4ccec2c 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll @@ -153,21 +153,6 @@ define void @test_3_inductions(ptr noalias %dst, ptr noalias %src, i64 %n) #1 { ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV_0:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[IV_0_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_OR:%.*]] = or i32 [[IV_2]], [[IV_0]] -; CHECK-NEXT: [[IV_OR_EXT:%.*]] = sext i32 [[IV_OR]] to i64 -; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV_OR_EXT]] -; CHECK-NEXT: store ptr [[GEP_SRC]], ptr [[DST]], align 8 -; CHECK-NEXT: [[IV_0_NEXT]] = add i32 [[IV_0]], 2 -; CHECK-NEXT: [[IV_1_NEXT]] = add i64 [[IV_1]], 1 -; CHECK-NEXT: [[IV_2_NEXT]] = add i32 [[IV_2]], 2 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_1]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -228,27 +213,6 @@ define void @redundant_iv_trunc_for_cse(ptr noalias %src, ptr noalias %dst, i64 ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 4 -; CHECK-NEXT: [[C_0:%.*]] = icmp eq i32 [[L]], 0 -; CHECK-NEXT: [[TRUNC_IV:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-NEXT: br i1 [[C_0]], label %[[THEN:.*]], label %[[LOOP_LATCH]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[TRUNC_IV_2:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-NEXT: [[SHL_IV:%.*]] = shl i32 [[TRUNC_IV_2]], 16 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[P:%.*]] = phi i32 [ [[SHL_IV]], %[[THEN]] ], [ [[TRUNC_IV]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[TRUNC_P:%.*]] = trunc i32 [[P]] to i8 -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i8 [[TRUNC_P]], ptr [[GEP_DST]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll index 63d1af38..7e6e45f 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll @@ -133,24 +133,11 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) { ; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL-OUTLOOP: middle.block: ; IF-EVL-OUTLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]]) -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; IF-EVL-OUTLOOP: scalar.ph: ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; IF-EVL-OUTLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_08]] -; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX]], align 2 -; IF-EVL-OUTLOOP-NEXT: [[CONV:%.*]] = sext i16 [[TMP13]] to i32 -; IF-EVL-OUTLOOP-NEXT: [[ADD]] = add nsw i32 [[R_07]], [[CONV]] -; IF-EVL-OUTLOOP-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; IF-EVL-OUTLOOP-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; IF-EVL-OUTLOOP: for.cond.cleanup.loopexit: -; IF-EVL-OUTLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ] ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_COND_CLEANUP]] ; IF-EVL-OUTLOOP: for.cond.cleanup: -; IF-EVL-OUTLOOP-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; IF-EVL-OUTLOOP-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP12]], [[FOR_BODY]] ] ; IF-EVL-OUTLOOP-NEXT: ret i32 [[R_0_LCSSA]] ; ; IF-EVL-INLOOP-LABEL: @add_i16_i32( @@ -176,24 +163,11 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) { ; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 ; IF-EVL-INLOOP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL-INLOOP: middle.block: -; IF-EVL-INLOOP-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; IF-EVL-INLOOP: scalar.ph: ; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; IF-EVL-INLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_08]] -; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX]], align 2 -; IF-EVL-INLOOP-NEXT: [[CONV:%.*]] = sext i16 [[TMP13]] to i32 -; IF-EVL-INLOOP-NEXT: [[ADD]] = add nsw i32 [[R_07]], [[CONV]] -; IF-EVL-INLOOP-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; IF-EVL-INLOOP-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; IF-EVL-INLOOP: for.cond.cleanup.loopexit: -; IF-EVL-INLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] ; IF-EVL-INLOOP-NEXT: br label [[FOR_COND_CLEANUP]] ; IF-EVL-INLOOP: for.cond.cleanup: -; IF-EVL-INLOOP-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; IF-EVL-INLOOP-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP11]], [[FOR_BODY]] ] ; IF-EVL-INLOOP-NEXT: ret i32 [[R_0_LCSSA]] ; entry: @@ -330,22 +304,9 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL-OUTLOOP: middle.block: ; IF-EVL-OUTLOOP-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP15]]) -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-OUTLOOP: scalar.ph: ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-OUTLOOP-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-OUTLOOP-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP19]], [[RDX]] -; IF-EVL-OUTLOOP-NEXT: [[SMIN]] = select i1 [[CMP_I]], i32 [[TMP19]], i32 [[RDX]] -; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; IF-EVL-OUTLOOP: for.end: -; IF-EVL-OUTLOOP-NEXT: [[SMIN_LCSSA:%.*]] = phi i32 [ [[SMIN]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-OUTLOOP-NEXT: ret i32 [[SMIN_LCSSA]] +; IF-EVL-OUTLOOP-NEXT: ret i32 [[TMP18]] ; ; IF-EVL-INLOOP-LABEL: @smin( ; IF-EVL-INLOOP-NEXT: entry: @@ -367,22 +328,9 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-INLOOP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL-INLOOP: middle.block: -; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-INLOOP: scalar.ph: ; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-INLOOP-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-INLOOP-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP16]], [[RDX]] -; IF-EVL-INLOOP-NEXT: [[SMIN]] = select i1 [[CMP_I]], i32 [[TMP16]], i32 [[RDX]] -; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; IF-EVL-INLOOP: for.end: -; IF-EVL-INLOOP-NEXT: [[SMIN_LCSSA:%.*]] = phi i32 [ [[SMIN]], [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ] -; IF-EVL-INLOOP-NEXT: ret i32 [[SMIN_LCSSA]] +; IF-EVL-INLOOP-NEXT: ret i32 [[RDX_MINMAX]] ; ; IF-EVL-LABEL: @smin( ; IF-EVL-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll index 43560d2..31c8b74 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll @@ -31,24 +31,7 @@ define void @load_store_factor2_i32(ptr %p) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; CHECK-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2 -; CHECK-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -73,24 +56,7 @@ define void @load_store_factor2_i32(ptr %p) { ; FIXED-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; FIXED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; FIXED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; FIXED-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2 -; FIXED-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -121,24 +87,7 @@ define void @load_store_factor2_i32(ptr %p) { ; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; SCALABLE-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; SCALABLE-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; SCALABLE-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2 -; SCALABLE-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -194,24 +143,7 @@ define void @load_store_factor2_i64(ptr %p) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -236,24 +168,7 @@ define void @load_store_factor2_i64(ptr %p) { ; FIXED-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -284,24 +199,7 @@ define void @load_store_factor2_i64(ptr %p) { ; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -359,29 +257,7 @@ define void @load_store_factor3_i32(ptr %p) { ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; CHECK-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2 -; CHECK-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4 -; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; CHECK-NEXT: [[Q2:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET2]] -; CHECK-NEXT: [[X2:%.*]] = load i32, ptr [[Q2]], align 4 -; CHECK-NEXT: [[Y2:%.*]] = add i32 [[X2]], 3 -; CHECK-NEXT: store i32 [[Y2]], ptr [[Q2]], align 4 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -410,29 +286,7 @@ define void @load_store_factor3_i32(ptr %p) { ; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; FIXED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; FIXED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; FIXED-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2 -; FIXED-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4 -; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; FIXED-NEXT: [[Q2:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET2]] -; FIXED-NEXT: [[X2:%.*]] = load i32, ptr [[Q2]], align 4 -; FIXED-NEXT: [[Y2:%.*]] = add i32 [[X2]], 3 -; FIXED-NEXT: store i32 [[Y2]], ptr [[Q2]], align 4 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -465,29 +319,7 @@ define void @load_store_factor3_i32(ptr %p) { ; SCALABLE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; SCALABLE-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; SCALABLE-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; SCALABLE-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2 -; SCALABLE-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4 -; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET2]] -; SCALABLE-NEXT: [[X2:%.*]] = load i32, ptr [[Q2]], align 4 -; SCALABLE-NEXT: [[Y2:%.*]] = add i32 [[X2]], 3 -; SCALABLE-NEXT: store i32 [[Y2]], ptr [[Q2]], align 4 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -551,29 +383,7 @@ define void @load_store_factor3_i64(ptr %p) { ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -602,29 +412,7 @@ define void @load_store_factor3_i64(ptr %p) { ; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -657,29 +445,7 @@ define void @load_store_factor3_i64(ptr %p) { ; SCALABLE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -745,34 +511,7 @@ define void @load_store_factor4(ptr %p) { ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 4 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; CHECK-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; CHECK-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; CHECK-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; CHECK-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -803,34 +542,7 @@ define void @load_store_factor4(ptr %p) { ; FIXED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 4 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; FIXED-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; FIXED-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; FIXED-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; FIXED-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; FIXED-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -865,34 +577,7 @@ define void @load_store_factor4(ptr %p) { ; SCALABLE-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 4 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; SCALABLE-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; SCALABLE-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -966,39 +651,7 @@ define void @load_store_factor5(ptr %p) { ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 5 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; CHECK-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; CHECK-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; CHECK-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; CHECK-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; CHECK-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; CHECK-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; CHECK-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; CHECK-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; CHECK-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -1033,39 +686,7 @@ define void @load_store_factor5(ptr %p) { ; FIXED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 5 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; FIXED-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; FIXED-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; FIXED-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; FIXED-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; FIXED-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; FIXED-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; FIXED-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; FIXED-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; FIXED-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; FIXED-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -1102,39 +723,7 @@ define void @load_store_factor5(ptr %p) { ; SCALABLE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 5 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; SCALABLE-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; SCALABLE-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; SCALABLE-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; SCALABLE-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; SCALABLE-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -1216,44 +805,7 @@ define void @load_store_factor6(ptr %p) { ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 6 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; CHECK-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; CHECK-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; CHECK-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; CHECK-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; CHECK-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; CHECK-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; CHECK-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; CHECK-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; CHECK-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; CHECK-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; CHECK-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; CHECK-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; CHECK-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; CHECK-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -1291,44 +843,7 @@ define void @load_store_factor6(ptr %p) { ; FIXED-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 6 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; FIXED-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; FIXED-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; FIXED-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; FIXED-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; FIXED-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; FIXED-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; FIXED-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; FIXED-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; FIXED-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; FIXED-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; FIXED-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; FIXED-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; FIXED-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; FIXED-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; FIXED-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -1367,44 +882,7 @@ define void @load_store_factor6(ptr %p) { ; SCALABLE-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 6 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; SCALABLE-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; SCALABLE-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; SCALABLE-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; SCALABLE-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; SCALABLE-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; SCALABLE-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; SCALABLE-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; SCALABLE-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; SCALABLE-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -1494,49 +972,7 @@ define void @load_store_factor7(ptr %p) { ; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 7 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; CHECK-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; CHECK-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; CHECK-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; CHECK-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; CHECK-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; CHECK-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; CHECK-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; CHECK-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; CHECK-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; CHECK-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; CHECK-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; CHECK-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; CHECK-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; CHECK-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; CHECK-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1 -; CHECK-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]] -; CHECK-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8 -; CHECK-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7 -; CHECK-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -1578,49 +1014,7 @@ define void @load_store_factor7(ptr %p) { ; FIXED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 7 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; FIXED-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; FIXED-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; FIXED-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; FIXED-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; FIXED-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; FIXED-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; FIXED-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; FIXED-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; FIXED-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; FIXED-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; FIXED-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; FIXED-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; FIXED-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; FIXED-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; FIXED-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; FIXED-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1 -; FIXED-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]] -; FIXED-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8 -; FIXED-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7 -; FIXED-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -1661,49 +1055,7 @@ define void @load_store_factor7(ptr %p) { ; SCALABLE-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 7 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; SCALABLE-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; SCALABLE-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; SCALABLE-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; SCALABLE-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; SCALABLE-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; SCALABLE-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; SCALABLE-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; SCALABLE-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; SCALABLE-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; SCALABLE-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1 -; SCALABLE-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]] -; SCALABLE-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8 -; SCALABLE-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7 -; SCALABLE-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -1801,54 +1153,7 @@ define void @load_store_factor8(ptr %p) { ; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 3 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; CHECK-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; CHECK-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; CHECK-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; CHECK-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; CHECK-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; CHECK-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; CHECK-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; CHECK-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; CHECK-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; CHECK-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; CHECK-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; CHECK-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; CHECK-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; CHECK-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; CHECK-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1 -; CHECK-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]] -; CHECK-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8 -; CHECK-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7 -; CHECK-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8 -; CHECK-NEXT: [[OFFSET7:%.*]] = add i64 [[OFFSET6]], 1 -; CHECK-NEXT: [[Q7:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET7]] -; CHECK-NEXT: [[X7:%.*]] = load i64, ptr [[Q7]], align 8 -; CHECK-NEXT: [[Y7:%.*]] = add i64 [[X7]], 8 -; CHECK-NEXT: store i64 [[Y7]], ptr [[Q7]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -1891,54 +1196,7 @@ define void @load_store_factor8(ptr %p) { ; FIXED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 3 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; FIXED-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; FIXED-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; FIXED-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; FIXED-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; FIXED-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; FIXED-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; FIXED-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; FIXED-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; FIXED-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; FIXED-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; FIXED-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; FIXED-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; FIXED-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; FIXED-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; FIXED-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; FIXED-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1 -; FIXED-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]] -; FIXED-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8 -; FIXED-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7 -; FIXED-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8 -; FIXED-NEXT: [[OFFSET7:%.*]] = add i64 [[OFFSET6]], 1 -; FIXED-NEXT: [[Q7:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET7]] -; FIXED-NEXT: [[X7:%.*]] = load i64, ptr [[Q7]], align 8 -; FIXED-NEXT: [[Y7:%.*]] = add i64 [[X7]], 8 -; FIXED-NEXT: store i64 [[Y7]], ptr [[Q7]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -1981,54 +1239,7 @@ define void @load_store_factor8(ptr %p) { ; SCALABLE-NEXT: [[TMP25:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 3 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; SCALABLE-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; SCALABLE-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; SCALABLE-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; SCALABLE-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; SCALABLE-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; SCALABLE-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; SCALABLE-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; SCALABLE-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; SCALABLE-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; SCALABLE-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1 -; SCALABLE-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]] -; SCALABLE-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8 -; SCALABLE-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7 -; SCALABLE-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8 -; SCALABLE-NEXT: [[OFFSET7:%.*]] = add i64 [[OFFSET6]], 1 -; SCALABLE-NEXT: [[Q7:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET7]] -; SCALABLE-NEXT: [[X7:%.*]] = load i64, ptr [[Q7]], align 8 -; SCALABLE-NEXT: [[Y7:%.*]] = add i64 [[X7]], 8 -; SCALABLE-NEXT: store i64 [[Y7]], ptr [[Q7]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -2118,23 +1329,7 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) { ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; CHECK-NEXT: [[RES:%.*]] = add i32 [[X0]], [[X1]] -; CHECK-NEXT: [[DST:%.*]] = getelementptr i32, ptr [[Q]], i64 [[I]] -; CHECK-NEXT: store i32 [[RES]], ptr [[DST]], align 4 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -2157,23 +1352,7 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) { ; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; FIXED-NEXT: [[RES:%.*]] = add i32 [[X0]], [[X1]] -; FIXED-NEXT: [[DST:%.*]] = getelementptr i32, ptr [[Q]], i64 [[I]] -; FIXED-NEXT: store i32 [[RES]], ptr [[DST]], align 4 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -2202,23 +1381,7 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) { ; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; SCALABLE-NEXT: [[RES:%.*]] = add i32 [[X0]], [[X1]] -; SCALABLE-NEXT: [[DST:%.*]] = getelementptr i32, ptr [[Q]], i64 [[I]] -; SCALABLE-NEXT: store i32 [[RES]], ptr [[DST]], align 4 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -2273,23 +1436,7 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) { ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[RES:%.*]] = add i64 [[X0]], [[X1]] -; CHECK-NEXT: [[DST:%.*]] = getelementptr i64, ptr [[Q]], i64 [[I]] -; CHECK-NEXT: store i64 [[RES]], ptr [[DST]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -2312,23 +1459,7 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) { ; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[RES:%.*]] = add i64 [[X0]], [[X1]] -; FIXED-NEXT: [[DST:%.*]] = getelementptr i64, ptr [[Q]], i64 [[I]] -; FIXED-NEXT: store i64 [[RES]], ptr [[DST]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -2357,23 +1488,7 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) { ; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[RES:%.*]] = add i64 [[X0]], [[X1]] -; SCALABLE-NEXT: [[DST:%.*]] = getelementptr i64, ptr [[Q]], i64 [[I]] -; SCALABLE-NEXT: store i64 [[RES]], ptr [[DST]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll index a30aebb..ef0f0cf 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll @@ -96,7 +96,8 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali ; PREDICATED_DATA-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; PREDICATED_DATA: middle.block: ; PREDICATED_DATA-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_DATA: scalar.ph: +; PREDICATED_DATA: for.end: +; PREDICATED_DATA-NEXT: ret void ; ; PREDICATED_DATA-WITH-EVL-LABEL: define void @masked_strided_factor2 ; PREDICATED_DATA-WITH-EVL-SAME: (ptr noalias readonly captures(none) [[P:%.*]], ptr noalias captures(none) [[Q:%.*]], i8 zeroext [[GUARD:%.*]]) #[[ATTR0:[0-9]+]] { @@ -135,9 +136,13 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali ; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr align 1 [[TMP10]], <vscale x 32 x i1> [[INTERLEAVED_MASK4]], i32 [[INTERLEAVE_EVL3]]) ; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP1]], [[EVL_BASED_IV]] ; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP1]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP12:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 +; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; PREDICATED_DATA-WITH-EVL: middle.block: ; PREDICATED_DATA-WITH-EVL-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_DATA-WITH-EVL: scalar.ph: +; PREDICATED_DATA-WITH-EVL: for.end: +; PREDICATED_DATA-WITH-EVL-NEXT: ret void ; entry: %conv = zext i8 %guard to i32 @@ -270,10 +275,11 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali ; PREDICATED_DATA-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP1]] ; PREDICATED_DATA-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] ; PREDICATED_DATA-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; PREDICATED_DATA-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; PREDICATED_DATA-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; PREDICATED_DATA: middle.block: ; PREDICATED_DATA-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_DATA: scalar.ph: +; PREDICATED_DATA: for.end: +; PREDICATED_DATA-NEXT: ret void ; ; PREDICATED_DATA-WITH-EVL-LABEL: define void @masked_strided_factor4 ; PREDICATED_DATA-WITH-EVL-SAME: (ptr noalias readonly captures(none) [[P:%.*]], ptr noalias captures(none) [[Q:%.*]], i8 zeroext [[GUARD:%.*]]) #[[ATTR0]] { @@ -316,9 +322,13 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali ; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr align 1 [[TMP15]], <vscale x 64 x i1> [[INTERLEAVED_MASK4]], i32 [[INTERLEAVE_EVL3]]) ; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP1]], [[EVL_BASED_IV]] ; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP1]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP16:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 +; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; PREDICATED_DATA-WITH-EVL: middle.block: ; PREDICATED_DATA-WITH-EVL-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_DATA-WITH-EVL: scalar.ph: +; PREDICATED_DATA-WITH-EVL: for.end: +; PREDICATED_DATA-WITH-EVL-NEXT: ret void ; entry: %conv = zext i8 %guard to i32 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll b/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll index cf2f78b..328ee16 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll @@ -62,18 +62,7 @@ define void @load_store(ptr %p) { ; LMUL2-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; LMUL2-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; LMUL2: middle.block: -; LMUL2-NEXT: br label [[FOR_END:%.*]] -; LMUL2: scalar.ph: ; LMUL2-NEXT: br label [[FOR_BODY:%.*]] -; LMUL2: for.body: -; LMUL2-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; LMUL2-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]] -; LMUL2-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8 -; LMUL2-NEXT: [[W:%.*]] = add i64 [[V]], 1 -; LMUL2-NEXT: store i64 [[W]], ptr [[Q]], align 8 -; LMUL2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; LMUL2-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; LMUL2-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; LMUL2: for.end: ; LMUL2-NEXT: ret void ; @@ -96,18 +85,7 @@ define void @load_store(ptr %p) { ; LMUL4-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; LMUL4-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; LMUL4: middle.block: -; LMUL4-NEXT: br label [[FOR_END:%.*]] -; LMUL4: scalar.ph: ; LMUL4-NEXT: br label [[FOR_BODY:%.*]] -; LMUL4: for.body: -; LMUL4-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; LMUL4-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]] -; LMUL4-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8 -; LMUL4-NEXT: [[W:%.*]] = add i64 [[V]], 1 -; LMUL4-NEXT: store i64 [[W]], ptr [[Q]], align 8 -; LMUL4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; LMUL4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; LMUL4-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; LMUL4: for.end: ; LMUL4-NEXT: ret void ; @@ -130,18 +108,7 @@ define void @load_store(ptr %p) { ; LMUL8-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; LMUL8-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; LMUL8: middle.block: -; LMUL8-NEXT: br label [[FOR_END:%.*]] -; LMUL8: scalar.ph: ; LMUL8-NEXT: br label [[FOR_BODY:%.*]] -; LMUL8: for.body: -; LMUL8-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; LMUL8-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]] -; LMUL8-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8 -; LMUL8-NEXT: [[W:%.*]] = add i64 [[V]], 1 -; LMUL8-NEXT: store i64 [[W]], ptr [[Q]], align 8 -; LMUL8-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; LMUL8-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; LMUL8-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; LMUL8: for.end: ; LMUL8-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll index 53907fa..8ef53ca 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll @@ -133,21 +133,7 @@ define void @trip8_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: call void @llvm.vp.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP7]], ptr align 1 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]) ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i64 [[I_08]] -; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP15]], 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i64 [[I_08]] -; CHECK-NEXT: [[TMP16:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP16]] -; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 8 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -186,21 +172,7 @@ define void @trip16_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP11]], ptr align 1 [[TMP4]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP5]]) ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP7]], 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 [[I_08]] -; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP8]] -; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 16 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -240,21 +212,7 @@ define void @trip32_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP11]], ptr align 1 [[TMP4]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]]) ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP7]], 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 [[I_08]] -; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP8]] -; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 32 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -293,21 +251,7 @@ define void @trip24_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP7]], ptr align 1 [[DST]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]]) ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[I_08]] -; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP8]], 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[I_08]] -; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP9]] -; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 24 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll index ae6c90c..06b47aa 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll @@ -40,25 +40,7 @@ define void @test(ptr noalias nocapture %a, ptr noalias nocapture %b, i32 %v) { ; VLENUNK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; VLENUNK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VLENUNK: middle.block: -; VLENUNK-NEXT: br label [[FOR_END:%.*]] -; VLENUNK: scalar.ph: -; VLENUNK-NEXT: br label [[FOR_BODY:%.*]] -; VLENUNK: for.body: -; VLENUNK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; VLENUNK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[IV]], 512 -; VLENUNK-NEXT: br i1 [[ICMP]], label [[DO_LOAD:%.*]], label [[LATCH]] -; VLENUNK: do_load: -; VLENUNK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; VLENUNK-NEXT: [[ELEM:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; VLENUNK-NEXT: br label [[LATCH]] -; VLENUNK: latch: -; VLENUNK-NEXT: [[PHI:%.*]] = phi i32 [ [[ELEM]], [[DO_LOAD]] ], [ 0, [[FOR_BODY]] ] -; VLENUNK-NEXT: [[ADD:%.*]] = add i32 [[PHI]], [[V]] -; VLENUNK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; VLENUNK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 -; VLENUNK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; VLENUNK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; VLENUNK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; VLENUNK-NEXT: br label [[LATCH:%.*]] ; VLENUNK: for.end: ; VLENUNK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll index e0bd8aa..0a9b1e0 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll @@ -108,7 +108,8 @@ define i32 @vqdot(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP13]], [[TMP12]] ; FIXED-V-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX]]) ; FIXED-V-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-V: scalar.ph: +; FIXED-V: for.exit: +; FIXED-V-NEXT: ret i32 [[TMP15]] ; ; FIXED-ZVQDOTQ-LABEL: define i32 @vqdot( ; FIXED-ZVQDOTQ-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { @@ -143,7 +144,8 @@ define i32 @vqdot(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[BIN_RDX:%.*]] = add <2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] ; FIXED-ZVQDOTQ-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[BIN_RDX]]) ; FIXED-ZVQDOTQ-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-ZVQDOTQ: scalar.ph: +; FIXED-ZVQDOTQ: for.exit: +; FIXED-ZVQDOTQ-NEXT: ret i32 [[TMP13]] ; entry: br label %for.body @@ -263,12 +265,13 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[TMP13]] = add <8 x i32> [[TMP11]], [[VEC_PHI1]] ; FIXED-V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; FIXED-V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; FIXED-V: middle.block: ; FIXED-V-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP13]], [[TMP12]] ; FIXED-V-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX]]) ; FIXED-V-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-V: scalar.ph: +; FIXED-V: for.exit: +; FIXED-V-NEXT: ret i32 [[TMP15]] ; ; FIXED-ZVQDOTQ-LABEL: define i32 @vqdotu( ; FIXED-ZVQDOTQ-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -298,12 +301,13 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]]) ; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; FIXED-ZVQDOTQ-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; FIXED-ZVQDOTQ: middle.block: ; FIXED-ZVQDOTQ-NEXT: [[BIN_RDX:%.*]] = add <2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] ; FIXED-ZVQDOTQ-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[BIN_RDX]]) ; FIXED-ZVQDOTQ-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-ZVQDOTQ: scalar.ph: +; FIXED-ZVQDOTQ: for.exit: +; FIXED-ZVQDOTQ-NEXT: ret i32 [[TMP13]] ; entry: br label %for.body @@ -423,12 +427,13 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[TMP13]] = add <8 x i32> [[TMP11]], [[VEC_PHI1]] ; FIXED-V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; FIXED-V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; FIXED-V: middle.block: ; FIXED-V-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP13]], [[TMP12]] ; FIXED-V-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX]]) ; FIXED-V-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-V: scalar.ph: +; FIXED-V: for.exit: +; FIXED-V-NEXT: ret i32 [[TMP15]] ; ; FIXED-ZVQDOTQ-LABEL: define i32 @vqdotsu( ; FIXED-ZVQDOTQ-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -458,12 +463,13 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]]) ; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; FIXED-ZVQDOTQ-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; FIXED-ZVQDOTQ: middle.block: ; FIXED-ZVQDOTQ-NEXT: [[BIN_RDX:%.*]] = add <2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] ; FIXED-ZVQDOTQ-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[BIN_RDX]]) ; FIXED-ZVQDOTQ-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-ZVQDOTQ: scalar.ph: +; FIXED-ZVQDOTQ: for.exit: +; FIXED-ZVQDOTQ-NEXT: ret i32 [[TMP13]] ; entry: br label %for.body @@ -582,12 +588,13 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[TMP13]] = add <8 x i32> [[TMP11]], [[VEC_PHI1]] ; FIXED-V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; FIXED-V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; FIXED-V: middle.block: ; FIXED-V-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP13]], [[TMP12]] ; FIXED-V-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX]]) ; FIXED-V-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-V: scalar.ph: +; FIXED-V: for.exit: +; FIXED-V-NEXT: ret i32 [[TMP15]] ; ; FIXED-ZVQDOTQ-LABEL: define i32 @vqdotsu2( ; FIXED-ZVQDOTQ-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -617,12 +624,13 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]]) ; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; FIXED-ZVQDOTQ-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; FIXED-ZVQDOTQ: middle.block: ; FIXED-ZVQDOTQ-NEXT: [[BIN_RDX:%.*]] = add <2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] ; FIXED-ZVQDOTQ-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[BIN_RDX]]) ; FIXED-ZVQDOTQ-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-ZVQDOTQ: scalar.ph: +; FIXED-ZVQDOTQ: for.exit: +; FIXED-ZVQDOTQ-NEXT: ret i32 [[TMP13]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll index 782c2f6..65928f8 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll @@ -49,30 +49,7 @@ define void @pr87378_vpinstruction_or_drop_poison_generating_flags(ptr %arg, i64 ; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[C_1:%.*]] = icmp ule i64 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[C_1]], label [[THEN_1:%.*]], label [[ELSE_1:%.*]] -; CHECK: then.1: -; CHECK-NEXT: [[C_2:%.*]] = icmp ule i64 [[IV]], [[B]] -; CHECK-NEXT: br i1 [[C_2]], label [[ELSE_1]], label [[MERGE:%.*]] -; CHECK: else.1: -; CHECK-NEXT: [[C_3:%.*]] = icmp ule i64 [[IV]], [[C]] -; CHECK-NEXT: br i1 [[C_3]], label [[THEN_2:%.*]], label [[LOOP_LATCH]] -; CHECK: then.2: -; CHECK-NEXT: br label [[MERGE]] -; CHECK: merge: -; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ poison, [[THEN_1]] ], [ [[IV]], [[THEN_2]] ] -; CHECK-NEXT: [[GETELEMENTPTR:%.*]] = getelementptr i16, ptr [[ARG]], i64 [[IDX]] -; CHECK-NEXT: store i16 0, ptr [[GETELEMENTPTR]], align 2 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i64 [[IV]], 1000 -; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll index 3739f85..8d4d282 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll @@ -37,27 +37,7 @@ define void @test(ptr %p, i64 %a, i8 %b) { ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_COND]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT1:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_COND1:%.*]] -; CHECK: for.cond: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH1:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY:%.*]] ] -; CHECK-NEXT: [[ADD]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2 -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48 -; CHECK-NEXT: [[ASHR:%.*]] = ashr i64 [[SHL]], 52 -; CHECK-NEXT: [[TRUNC_I32:%.*]] = trunc i64 [[ASHR]] to i32 -; CHECK-NEXT: br i1 [[CMP_SLT]], label [[COND_FALSE:%.*]], label [[FOR_BODY]] -; CHECK: cond.false: -; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[B]] to i32 -; CHECK-NEXT: br label [[FOR_BODY]] -; CHECK: for.body: -; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[TRUNC_I32]], [[FOR_COND1]] ], [ [[ZEXT]], [[COND_FALSE]] ] -; CHECK-NEXT: [[SHL_I32:%.*]] = shl i32 [[COND]], 8 -; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[SHL_I32]] to i8 -; CHECK-NEXT: store i8 [[TRUNC]], ptr [[P]], align 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV]], 8 -; CHECK-NEXT: br i1 [[CMP]], label [[FOR_COND1]], label [[EXIT1]] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll index 9b6bc68..735fb769 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll @@ -29,20 +29,8 @@ define i32 @add(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP10]], [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP11]] ; entry: br label %for.body @@ -85,20 +73,8 @@ define i32 @sub(ptr %a, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP3]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 1024, %[[SCALAR_PH]] ], [ [[SUB:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-NEXT: [[SUB]] = sub i32 [[RDX]], [[X]] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[SUB_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUB_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP6]] ; entry: br label %loop @@ -144,23 +120,8 @@ define i32 @addsub(ptr %a, ptr %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP5]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[SUB:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[RDX]], [[X]] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[Y:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[SUB]] = sub i32 [[ADD]], [[Y]] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[SUB_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[LOOP]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUB_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP8]] ; entry: br label %loop @@ -209,20 +170,8 @@ define i32 @or(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[OR:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[OR]] = or i32 [[TMP10]], [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[OR_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP11]] ; entry: br label %for.body @@ -267,20 +216,8 @@ define i32 @and(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[AND:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[AND]] = and i32 [[TMP10]], [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[AND_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP11]] ; entry: br label %for.body @@ -325,20 +262,8 @@ define i32 @xor(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[XOR]] = xor i32 [[TMP10]], [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[XOR_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP11]] ; entry: br label %for.body @@ -384,21 +309,8 @@ define i32 @smin(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP11]], [[SUM_010]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP12]] ; entry: br label %for.body @@ -445,21 +357,8 @@ define i32 @umax(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP11]], [[SUM_010]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP12]] ; entry: br label %for.body @@ -505,20 +404,8 @@ define float @fadd_fast(ptr noalias nocapture readonly %a, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = fadd fast float [[TMP10]], [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[ADD_LCSSA]] +; CHECK-NEXT: ret float [[TMP11]] ; entry: br label %for.body @@ -561,20 +448,8 @@ define half @fadd_fast_half_zvfh(ptr noalias nocapture readonly %a, i64 %n) "tar ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call fast half @llvm.vector.reduce.fadd.nxv8f16(half 0xH0000, <vscale x 8 x half> [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load half, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = fadd fast half [[TMP10]], [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret half [[ADD_LCSSA]] +; CHECK-NEXT: ret half [[TMP11]] ; entry: br label %for.body @@ -744,21 +619,8 @@ define float @fmin_fast(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt float [[TMP11]], [[SUM_07]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret float [[TMP12]] ; entry: br label %for.body @@ -803,21 +665,8 @@ define half @fmin_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) # ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call half @llvm.vector.reduce.fmin.nxv8f16(<vscale x 8 x half> [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt half [[TMP11]], [[SUM_07]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret half [[TMP12]] ; entry: br label %for.body @@ -862,21 +711,8 @@ define bfloat @fmin_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call bfloat @llvm.vector.reduce.fmin.nxv8bf16(<vscale x 8 x bfloat> [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ 0xR0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt bfloat [[TMP11]], [[SUM_07]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret bfloat [[TMP12]] ; entry: br label %for.body @@ -923,21 +759,8 @@ define float @fmax_fast(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt float [[TMP11]], [[SUM_07]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret float [[TMP12]] ; entry: br label %for.body @@ -982,21 +805,8 @@ define half @fmax_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) # ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call fast half @llvm.vector.reduce.fmax.nxv8f16(<vscale x 8 x half> [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt half [[TMP11]], [[SUM_07]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret half [[TMP12]] ; entry: br label %for.body @@ -1041,21 +851,8 @@ define bfloat @fmax_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call fast bfloat @llvm.vector.reduce.fmax.nxv8bf16(<vscale x 8 x bfloat> [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ 0xR0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt bfloat [[TMP11]], [[SUM_07]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret bfloat [[TMP12]] ; entry: br label %for.body @@ -1243,22 +1040,8 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP16:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[SUM_07]]) -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[MULADD_LCSSA]] +; CHECK-NEXT: ret float [[TMP16]] ; entry: br label %for.body @@ -1305,22 +1088,8 @@ define half @fmuladd_f16_zvfh(ptr %a, ptr %b, i64 %n) "target-features"="+zvfh" ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP16:%.*]] = call reassoc half @llvm.vector.reduce.fadd.nxv8f16(half 0xH8000, <vscale x 8 x half> [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[TMP12:%.*]] = load half, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[MULADD]] = tail call reassoc half @llvm.fmuladd.f16(half [[TMP11]], half [[TMP12]], half [[SUM_07]]) -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret half [[MULADD_LCSSA]] +; CHECK-NEXT: ret half [[TMP16]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll index 93c0a74..850a6cb 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll @@ -58,36 +58,6 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; CHECK-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[WIDE_IV_0:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_0_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[WIDE_IV_1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_1_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[WIDE_IV_2:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_2_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[WIDE_IV_0_SUB:%.*]] = sub i64 [[WIDE_IV_0]], 1 -; CHECK-NEXT: [[A_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_0_SUB]] -; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[A_GEP0]], align 1 -; CHECK-NEXT: [[WIDE_IV_1_SUB:%.*]] = sub i64 [[WIDE_IV_1]], 1 -; CHECK-NEXT: [[B_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_1_SUB]] -; CHECK-NEXT: [[B:%.*]] = load i8, ptr [[B_GEP0]], align 1 -; CHECK-NEXT: [[WIDE_IV_2_SUB:%.*]] = sub i64 [[WIDE_IV_2]], 1 -; CHECK-NEXT: [[C_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_2_SUB]] -; CHECK-NEXT: [[C:%.*]] = load i8, ptr [[C_GEP0]], align 1 -; CHECK-NEXT: [[IV_MUL:%.*]] = mul i64 [[IV]], 3 -; CHECK-NEXT: [[BASE:%.*]] = getelementptr i8, ptr [[P1]], i64 [[IV_MUL]] -; CHECK-NEXT: [[A_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 0 -; CHECK-NEXT: store i8 [[A]], ptr [[A_GEP1]], align 1 -; CHECK-NEXT: [[B_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 1 -; CHECK-NEXT: store i8 [[B]], ptr [[B_GEP1]], align 1 -; CHECK-NEXT: [[C_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 2 -; CHECK-NEXT: store i8 [[C]], ptr [[C_GEP1]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[WIDE_IV_0_NEXT]] = add i64 [[WIDE_IV_0]], 2 -; CHECK-NEXT: [[WIDE_IV_1_NEXT]] = add i64 [[WIDE_IV_1]], 3 -; CHECK-NEXT: [[WIDE_IV_2_NEXT]] = add i64 [[WIDE_IV_2]], 4 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -145,36 +115,6 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; NO-REG-PRESSURE-CHECK-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-REG-PRESSURE-CHECK: [[MIDDLE_BLOCK]]: ; NO-REG-PRESSURE-CHECK-NEXT: br label %[[EXIT:.*]] -; NO-REG-PRESSURE-CHECK: [[SCALAR_PH:.*]]: -; NO-REG-PRESSURE-CHECK-NEXT: br label %[[LOOP:.*]] -; NO-REG-PRESSURE-CHECK: [[LOOP]]: -; NO-REG-PRESSURE-CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_0:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_0_NEXT:%.*]], %[[LOOP]] ] -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_1_NEXT:%.*]], %[[LOOP]] ] -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_2:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_2_NEXT:%.*]], %[[LOOP]] ] -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_0_SUB:%.*]] = sub i64 [[WIDE_IV_0]], 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[A_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_0_SUB]] -; NO-REG-PRESSURE-CHECK-NEXT: [[A:%.*]] = load i8, ptr [[A_GEP0]], align 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_1_SUB:%.*]] = sub i64 [[WIDE_IV_1]], 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[B_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_1_SUB]] -; NO-REG-PRESSURE-CHECK-NEXT: [[B:%.*]] = load i8, ptr [[B_GEP0]], align 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_2_SUB:%.*]] = sub i64 [[WIDE_IV_2]], 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[C_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_2_SUB]] -; NO-REG-PRESSURE-CHECK-NEXT: [[C:%.*]] = load i8, ptr [[C_GEP0]], align 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[IV_MUL:%.*]] = mul i64 [[IV]], 3 -; NO-REG-PRESSURE-CHECK-NEXT: [[BASE:%.*]] = getelementptr i8, ptr [[P1]], i64 [[IV_MUL]] -; NO-REG-PRESSURE-CHECK-NEXT: [[A_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 0 -; NO-REG-PRESSURE-CHECK-NEXT: store i8 [[A]], ptr [[A_GEP1]], align 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[B_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 1 -; NO-REG-PRESSURE-CHECK-NEXT: store i8 [[B]], ptr [[B_GEP1]], align 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[C_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 2 -; NO-REG-PRESSURE-CHECK-NEXT: store i8 [[C]], ptr [[C_GEP1]], align 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_0_NEXT]] = add i64 [[WIDE_IV_0]], 2 -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_1_NEXT]] = add i64 [[WIDE_IV_1]], 3 -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_2_NEXT]] = add i64 [[WIDE_IV_2]], 4 -; NO-REG-PRESSURE-CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV]], 1024 -; NO-REG-PRESSURE-CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; NO-REG-PRESSURE-CHECK: [[EXIT]]: ; NO-REG-PRESSURE-CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll index 7b8404a..b80368d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll @@ -21,18 +21,8 @@ define float @s311(float %a_0, float %s311_sum) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ [[S311_SUM]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED_NEXT]] = fadd float [[A_0]], [[RED]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1200 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RED_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RED_LCSSA]] +; CHECK-NEXT: ret float [[TMP6]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll index a165dde..5ca9bfd 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll @@ -53,10 +53,9 @@ define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) { ; RV64-NEXT: [[TMP23:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RV64-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; RV64: [[MIDDLE_BLOCK]]: -; RV64-NEXT: br [[EXIT:label %.*]] -; RV64: [[SCALAR_PH:.*:]] -; RV64-NEXT: br label %[[FOR_BODY:.*]] -; RV64: [[FOR_BODY]]: +; RV64-NEXT: br label %[[EXIT:.*]] +; RV64: [[EXIT]]: +; RV64-NEXT: ret void ; ; RV32-LABEL: define void @vector_reverse_i32( ; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { @@ -93,10 +92,9 @@ define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) { ; RV32-NEXT: [[TMP21:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RV32-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; RV32: [[MIDDLE_BLOCK]]: -; RV32-NEXT: br [[EXIT:label %.*]] -; RV32: [[SCALAR_PH:.*:]] -; RV32-NEXT: br label %[[FOR_BODY:.*]] -; RV32: [[FOR_BODY]]: +; RV32-NEXT: br label %[[EXIT:.*]] +; RV32: [[EXIT]]: +; RV32-NEXT: ret void ; ; RV64-UF2-LABEL: define void @vector_reverse_i32( ; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { @@ -718,10 +716,9 @@ define void @vector_reverse_f32_simplify(ptr noalias %A, ptr noalias %B) { ; RV64-NEXT: [[TMP23:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RV64-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; RV64: [[MIDDLE_BLOCK]]: -; RV64-NEXT: br [[EXIT:label %.*]] -; RV64: [[SCALAR_PH:.*:]] -; RV64-NEXT: br label %[[FOR_BODY:.*]] -; RV64: [[FOR_BODY]]: +; RV64-NEXT: br label %[[EXIT:.*]] +; RV64: [[EXIT]]: +; RV64-NEXT: ret void ; ; RV32-LABEL: define void @vector_reverse_f32_simplify( ; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { @@ -758,10 +755,9 @@ define void @vector_reverse_f32_simplify(ptr noalias %A, ptr noalias %B) { ; RV32-NEXT: [[TMP21:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RV32-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; RV32: [[MIDDLE_BLOCK]]: -; RV32-NEXT: br [[EXIT:label %.*]] -; RV32: [[SCALAR_PH:.*:]] -; RV32-NEXT: br label %[[FOR_BODY:.*]] -; RV32: [[FOR_BODY]]: +; RV32-NEXT: br label %[[EXIT:.*]] +; RV32: [[EXIT]]: +; RV32-NEXT: ret void ; ; RV64-UF2-LABEL: define void @vector_reverse_f32_simplify( ; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll index ecde164..e046816 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll @@ -28,19 +28,7 @@ define void @test(ptr %p) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 200 -; CHECK-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -81,19 +69,7 @@ define void @test_may_clobber(ptr %p) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 100 -; CHECK-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -137,19 +113,7 @@ define void @trivial_due_max_vscale(ptr %p) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 8192 -; CHECK-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -193,19 +157,7 @@ define void @no_high_lmul_or_interleave(ptr %p) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 1024 -; CHECK-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll index 544ddc5..7330ce6 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll @@ -27,18 +27,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -84,18 +73,7 @@ define void @vector_add_i32(ptr noalias nocapture %a, i32 %v, i64 %n) { ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[ELEM]], [[V]] -; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -179,18 +157,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8 -; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]] -; CHECK-NEXT: store i64 [[V]], ptr [[AADDR]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -235,23 +202,9 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP9]]) -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8 -; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[AADDR]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]] -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: -; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i64 [[TMP11]] ; entry: br label %for.body @@ -292,16 +245,7 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -340,16 +284,7 @@ define void @splat_ptr(ptr noalias nocapture %a, ptr %v, i64 %n) { ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store ptr [[V]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll index a596c63..3c90908 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll @@ -28,18 +28,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -84,18 +73,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8 -; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]] -; CHECK-NEXT: store i64 [[V]], ptr [[AADDR]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -140,23 +118,9 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP11]]) -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8 -; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[AADDR]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]] -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: -; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i64 [[TMP14]] ; entry: br label %for.body @@ -197,16 +161,7 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -246,17 +201,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: store i64 [[V]], ptr [[B]], align 8 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -356,18 +301,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll index 4bfe9a4..8971b0c 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll @@ -29,21 +29,8 @@ define i32 @select_icmp(i32 %x, i32 %y, ptr nocapture readonly %c, i64 %n) { ; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0 ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[A:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP12]], [[X]] -; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[COND_LCSSA]] +; CHECK-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body @@ -91,21 +78,8 @@ define i32 @select_fcmp(float %x, i32 %y, ptr nocapture readonly %c, i64 %n) { ; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0 ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[A:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[C]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast olt float [[TMP12]], [[X]] -; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[COND_LCSSA]] +; CHECK-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body @@ -151,21 +125,8 @@ define i32 @select_const_i32_from_icmp(ptr nocapture readonly %v, i64 %n) { ; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 7, i32 3 ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ 3, %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]] -; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4 -; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3 -; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 7 -; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]] -; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[DOTLCSSA]] +; CHECK-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body @@ -211,21 +172,8 @@ define i32 @select_i32_from_icmp(ptr nocapture readonly %v, i32 %a, i32 %b, i64 ; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[B]], i32 [[A]] ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[A]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]] -; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4 -; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3 -; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 [[B]] -; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]] -; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[DOTLCSSA]] +; CHECK-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body @@ -271,21 +219,8 @@ define i32 @select_const_i32_from_fcmp(ptr nocapture readonly %v, i64 %n) { ; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 1, i32 2 ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[V]], i64 [[TMP12]] -; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4 -; CHECK-NEXT: [[TMP16:%.*]] = fcmp fast ueq float [[TMP15]], 3.000000e+00 -; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 1 -; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]] -; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[DOTLCSSA]] +; CHECK-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body @@ -373,29 +308,8 @@ define i32 @pred_select_const_i32_from_icmp(ptr noalias nocapture readonly %src1 ; CHECK-NEXT: [[TMP13:%.*]] = freeze i1 [[TMP12]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP13]], i32 1, i32 0 ; CHECK-NEXT: br label %[[FOR_END_LOOPEXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[I_013:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_INC:.*]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[R_012:%.*]] = phi i32 [ [[R_1:%.*]], %[[FOR_INC]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[I_013]] -; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP14]], 35 -; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[FOR_INC]] -; CHECK: [[IF_THEN]]: -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[SRC2]], i64 [[I_013]] -; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i32 [[TMP15]], 2 -; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[CMP3]], i32 1, i32 [[R_012]] -; CHECK-NEXT: br label %[[FOR_INC]] -; CHECK: [[FOR_INC]]: -; CHECK-NEXT: [[R_1]] = phi i32 [ [[R_012]], %[[FOR_BODY]] ], [ [[SPEC_SELECT]], %[[IF_THEN]] ] -; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_013]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]] ; CHECK: [[FOR_END_LOOPEXIT]]: -; CHECK-NEXT: [[R_1_LCSSA:%.*]] = phi i32 [ [[R_1]], %[[FOR_INC]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[R_1_LCSSA]] +; CHECK-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll index ca1c710..2fbc73e 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll @@ -31,19 +31,7 @@ define void @single_constant_stride_int_scaled(ptr %p) { ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[SCALAR_PH:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH1:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], 8 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]] -; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[SCALAR_PH]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -147,20 +135,7 @@ define void @single_constant_stride_int_iv(ptr %p) { ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]] -; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; CHECK-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], 64 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -264,19 +239,7 @@ define void @single_constant_stride_ptr_iv(ptr %p) { ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[SCALAR_PH:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH1:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[P]], [[SCALAR_PH1]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[PTR]], align 4 -; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; CHECK-NEXT: store i32 [[Y0]], ptr [[PTR]], align 4 -; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[SCALAR_PH]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -1357,18 +1320,7 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) { ; NOSTRIDED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; NOSTRIDED-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; NOSTRIDED: middle.block: -; NOSTRIDED-NEXT: br label [[EXIT:%.*]] -; NOSTRIDED: scalar.ph: ; NOSTRIDED-NEXT: br label [[LOOP:%.*]] -; NOSTRIDED: loop: -; NOSTRIDED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; NOSTRIDED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[IN]], i64 [[IV]] -; NOSTRIDED-NEXT: [[TMP8:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; NOSTRIDED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT]], i64 [[IV]] -; NOSTRIDED-NEXT: store i64 [[TMP8]], ptr [[ARRAYIDX2]], align 8 -; NOSTRIDED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; NOSTRIDED: exit: ; NOSTRIDED-NEXT: ret void ; @@ -1452,18 +1404,7 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) { ; STRIDED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; STRIDED-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; STRIDED: middle.block: -; STRIDED-NEXT: br label [[EXIT:%.*]] -; STRIDED: scalar.ph: ; STRIDED-NEXT: br label [[LOOP:%.*]] -; STRIDED: loop: -; STRIDED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; STRIDED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[IN]], i64 [[IV]] -; STRIDED-NEXT: [[TMP8:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; STRIDED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT]], i64 [[IV]] -; STRIDED-NEXT: store i64 [[TMP8]], ptr [[ARRAYIDX2]], align 8 -; STRIDED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; STRIDED: exit: ; STRIDED-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll index 6652fef..8ab0f6f 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll @@ -1206,17 +1206,6 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[EXIT:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[LOOP:.*]] -; IF-EVL: [[LOOP]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[GEP]] to i64 -; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: store i64 [[TMP0]], ptr [[GEP2]], align 8 -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]] ; IF-EVL: [[EXIT]]: ; IF-EVL-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll index 61f97aa..34a8275 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll @@ -43,23 +43,9 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL-OUTLOOP: middle.block: ; IF-EVL-OUTLOOP-NEXT: [[TMP24:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP20]]) -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-OUTLOOP: scalar.ph: ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP27]], 3 -; IF-EVL-OUTLOOP-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[TMP27]], i32 0 -; IF-EVL-OUTLOOP-NEXT: [[ADD]] = add nsw i32 [[SELECT]], [[RDX]] -; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL-OUTLOOP: for.end: -; IF-EVL-OUTLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP24]], [[MIDDLE_BLOCK]] ] -; IF-EVL-OUTLOOP-NEXT: ret i32 [[ADD_LCSSA]] +; IF-EVL-OUTLOOP-NEXT: ret i32 [[TMP24]] ; ; IF-EVL-INLOOP-LABEL: define i32 @cond_add( ; IF-EVL-INLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0:[0-9]+]] { @@ -84,23 +70,9 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL-INLOOP: middle.block: -; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-INLOOP: scalar.ph: ; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-INLOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP25]], 3 -; IF-EVL-INLOOP-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[TMP25]], i32 0 -; IF-EVL-INLOOP-NEXT: [[ADD]] = add nsw i32 [[SELECT]], [[RDX]] -; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL-INLOOP: for.end: -; IF-EVL-INLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ] -; IF-EVL-INLOOP-NEXT: ret i32 [[ADD_LCSSA]] +; IF-EVL-INLOOP-NEXT: ret i32 [[TMP22]] ; ; NO-VP-OUTLOOP-LABEL: define i32 @cond_add( ; NO-VP-OUTLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0:[0-9]+]] { @@ -239,30 +211,12 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]] ; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP23]] ; IF-EVL-OUTLOOP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL-OUTLOOP: middle.block: ; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PREDPHI]]) -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-OUTLOOP: scalar.ph: -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ] -; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-OUTLOOP-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP28]], 3 -; IF-EVL-OUTLOOP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; IF-EVL-OUTLOOP: if.then: -; IF-EVL-OUTLOOP-NEXT: [[ADD_PRED:%.*]] = add nsw i32 [[RDX]], [[TMP28]] -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_INC]] -; IF-EVL-OUTLOOP: for.inc: -; IF-EVL-OUTLOOP-NEXT: [[RDX_ADD]] = phi i32 [ [[ADD_PRED]], [[IF_THEN]] ], [ [[RDX]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]] +; IF-EVL-OUTLOOP-NEXT: br label [[FOR_INC:%.*]] ; IF-EVL-OUTLOOP: for.end: -; IF-EVL-OUTLOOP-NEXT: [[RDX_ADD_LCSSA:%.*]] = phi i32 [ [[RDX_ADD]], [[FOR_INC]] ], [ [[TMP27]], [[MIDDLE_BLOCK]] ] -; IF-EVL-OUTLOOP-NEXT: ret i32 [[RDX_ADD_LCSSA]] +; IF-EVL-OUTLOOP-NEXT: ret i32 [[TMP27]] ; ; IF-EVL-INLOOP-LABEL: define i32 @cond_add_pred( ; IF-EVL-INLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] { @@ -284,29 +238,11 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]] ; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP23]] ; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL-INLOOP: middle.block: -; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-INLOOP: scalar.ph: -; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ] -; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-INLOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP25]], 3 -; IF-EVL-INLOOP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; IF-EVL-INLOOP: if.then: -; IF-EVL-INLOOP-NEXT: [[ADD_PRED:%.*]] = add nsw i32 [[RDX]], [[TMP25]] -; IF-EVL-INLOOP-NEXT: br label [[FOR_INC]] -; IF-EVL-INLOOP: for.inc: -; IF-EVL-INLOOP-NEXT: [[RDX_ADD]] = phi i32 [ [[ADD_PRED]], [[IF_THEN]] ], [ [[RDX]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]] +; IF-EVL-INLOOP-NEXT: br label [[FOR_INC:%.*]] ; IF-EVL-INLOOP: for.end: -; IF-EVL-INLOOP-NEXT: [[RDX_ADD_LCSSA:%.*]] = phi i32 [ [[RDX_ADD]], [[FOR_INC]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ] -; IF-EVL-INLOOP-NEXT: ret i32 [[RDX_ADD_LCSSA]] +; IF-EVL-INLOOP-NEXT: ret i32 [[TMP22]] ; ; NO-VP-OUTLOOP-LABEL: define i32 @cond_add_pred( ; NO-VP-OUTLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] { @@ -466,27 +402,12 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]] ; IF-EVL-OUTLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-OUTLOOP-NEXT: [[TMP21:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL-OUTLOOP: middle.block: ; IF-EVL-OUTLOOP-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]]) -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-OUTLOOP: scalar.ph: ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-OUTLOOP-NEXT: [[TMP37:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-OUTLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32 -; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP37]], [[IV_TRUNC]] -; IF-EVL-OUTLOOP-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[TMP37]], i32 0 -; IF-EVL-OUTLOOP-NEXT: [[ADD]] = add nsw i32 [[SELECT]], [[RDX]] -; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]] ; IF-EVL-OUTLOOP: for.end: -; IF-EVL-OUTLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ] -; IF-EVL-OUTLOOP-NEXT: ret i32 [[ADD_LCSSA]] +; IF-EVL-OUTLOOP-NEXT: ret i32 [[TMP22]] ; ; IF-EVL-INLOOP-LABEL: define i32 @step_cond_add( ; IF-EVL-INLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] { @@ -516,26 +437,11 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]] ; IF-EVL-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-INLOOP-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-INLOOP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-INLOOP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL-INLOOP: middle.block: -; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-INLOOP: scalar.ph: ; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[RDX1:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-INLOOP-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-INLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32 -; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP28]], [[IV_TRUNC]] -; IF-EVL-INLOOP-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[TMP28]], i32 0 -; IF-EVL-INLOOP-NEXT: [[ADD1]] = add nsw i32 [[SELECT]], [[RDX1]] -; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]] ; IF-EVL-INLOOP: for.end: -; IF-EVL-INLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD1]], [[FOR_BODY]] ], [ [[ADD]], [[MIDDLE_BLOCK]] ] -; IF-EVL-INLOOP-NEXT: ret i32 [[ADD_LCSSA]] +; IF-EVL-INLOOP-NEXT: ret i32 [[ADD]] ; ; NO-VP-OUTLOOP-LABEL: define i32 @step_cond_add( ; NO-VP-OUTLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] { @@ -700,31 +606,12 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]] ; IF-EVL-OUTLOOP-NEXT: [[VEC_IND_NEXT7]] = add <vscale x 4 x i32> [[VEC_IND2]], [[BROADCAST_SPLAT2]] ; IF-EVL-OUTLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL-OUTLOOP: middle.block: ; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP24]]) -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-OUTLOOP: scalar.ph: -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ] -; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ] -; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] -; IF-EVL-OUTLOOP-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4 -; IF-EVL-OUTLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV1]] to i32 -; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP38]], [[IV_TRUNC]] -; IF-EVL-OUTLOOP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[MIDDLE_BLOCK]] -; IF-EVL-OUTLOOP: if.then: -; IF-EVL-OUTLOOP-NEXT: [[ADD_PRED:%.*]] = add nsw i32 [[BC_MERGE_RDX]], [[TMP38]] -; IF-EVL-OUTLOOP-NEXT: br label [[MIDDLE_BLOCK]] -; IF-EVL-OUTLOOP: for.inc: -; IF-EVL-OUTLOOP-NEXT: [[RDX_ADD]] = phi i32 [ [[ADD_PRED]], [[IF_THEN]] ], [ [[BC_MERGE_RDX]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 -; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]] +; IF-EVL-OUTLOOP-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; IF-EVL-OUTLOOP: for.end: -; IF-EVL-OUTLOOP-NEXT: [[RDX_ADD_LCSSA:%.*]] = phi i32 [ [[RDX_ADD]], [[MIDDLE_BLOCK]] ], [ [[TMP27]], [[MIDDLE_BLOCK1]] ] -; IF-EVL-OUTLOOP-NEXT: ret i32 [[RDX_ADD_LCSSA]] +; IF-EVL-OUTLOOP-NEXT: ret i32 [[TMP27]] ; ; IF-EVL-INLOOP-LABEL: define i32 @step_cond_add_pred( ; IF-EVL-INLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] { @@ -753,30 +640,11 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]] ; IF-EVL-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-INLOOP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-INLOOP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-INLOOP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL-INLOOP: middle.block: -; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-INLOOP: scalar.ph: -; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ] -; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ] -; IF-EVL-INLOOP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] -; IF-EVL-INLOOP-NEXT: [[TMP35:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4 -; IF-EVL-INLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV1]] to i32 -; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP35]], [[IV_TRUNC]] -; IF-EVL-INLOOP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[MIDDLE_BLOCK]] -; IF-EVL-INLOOP: if.then: -; IF-EVL-INLOOP-NEXT: [[ADD_PRED:%.*]] = add nsw i32 [[BC_MERGE_RDX]], [[TMP35]] -; IF-EVL-INLOOP-NEXT: br label [[MIDDLE_BLOCK]] -; IF-EVL-INLOOP: for.inc: -; IF-EVL-INLOOP-NEXT: [[RDX_ADD]] = phi i32 [ [[ADD_PRED]], [[IF_THEN]] ], [ [[BC_MERGE_RDX]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 -; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]] +; IF-EVL-INLOOP-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; IF-EVL-INLOOP: for.end: -; IF-EVL-INLOOP-NEXT: [[RDX_ADD_LCSSA:%.*]] = phi i32 [ [[RDX_ADD]], [[MIDDLE_BLOCK]] ], [ [[TMP17]], [[MIDDLE_BLOCK1]] ] -; IF-EVL-INLOOP-NEXT: ret i32 [[RDX_ADD_LCSSA]] +; IF-EVL-INLOOP-NEXT: ret i32 [[TMP17]] ; ; NO-VP-OUTLOOP-LABEL: define i32 @step_cond_add_pred( ; NO-VP-OUTLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] { @@ -931,20 +799,16 @@ for.end: ; IF-EVL-OUTLOOP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; IF-EVL-OUTLOOP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; IF-EVL-OUTLOOP: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; IF-EVL-OUTLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} -; IF-EVL-OUTLOOP: [[META4]] = !{!"llvm.loop.vectorize.enable", i1 true} +; IF-EVL-OUTLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} +; IF-EVL-OUTLOOP: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} ; IF-EVL-OUTLOOP: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} -; IF-EVL-OUTLOOP: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} -; IF-EVL-OUTLOOP: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} ;. ; IF-EVL-INLOOP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; IF-EVL-INLOOP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; IF-EVL-INLOOP: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; IF-EVL-INLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} -; IF-EVL-INLOOP: [[META4]] = !{!"llvm.loop.vectorize.enable", i1 true} +; IF-EVL-INLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} +; IF-EVL-INLOOP: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} ; IF-EVL-INLOOP: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} -; IF-EVL-INLOOP: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} -; IF-EVL-INLOOP: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} ;. ; NO-VP-OUTLOOP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; NO-VP-OUTLOOP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll index 22d216e..8cd540c 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll @@ -33,20 +33,6 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[EXIT:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[LOOP:.*]] -; IF-EVL: [[LOOP]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8 -; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i64, ptr [[B_GEP]], align 8 -; IF-EVL-NEXT: [[TMP18:%.*]] = sdiv i64 [[TMP16]], [[TMP17]] -; IF-EVL-NEXT: [[C_GEP:%.*]] = getelementptr i64, ptr [[C]], i64 [[IV]] -; IF-EVL-NEXT: store i64 [[TMP18]], ptr [[C_GEP]], align 8 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; IF-EVL-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; IF-EVL: [[EXIT]]: ; IF-EVL-NEXT: ret void ; @@ -143,20 +129,6 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[EXIT:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[LOOP:.*]] -; IF-EVL: [[LOOP]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8 -; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i64, ptr [[B_GEP]], align 8 -; IF-EVL-NEXT: [[TMP18:%.*]] = udiv i64 [[TMP16]], [[TMP17]] -; IF-EVL-NEXT: [[C_GEP:%.*]] = getelementptr i64, ptr [[C]], i64 [[IV]] -; IF-EVL-NEXT: store i64 [[TMP18]], ptr [[C_GEP]], align 8 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; IF-EVL-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; IF-EVL: [[EXIT]]: ; IF-EVL-NEXT: ret void ; @@ -252,20 +224,6 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[EXIT:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[LOOP:.*]] -; IF-EVL: [[LOOP]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8 -; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i64, ptr [[B_GEP]], align 8 -; IF-EVL-NEXT: [[TMP18:%.*]] = srem i64 [[TMP16]], [[TMP17]] -; IF-EVL-NEXT: [[C_GEP:%.*]] = getelementptr i64, ptr [[C]], i64 [[IV]] -; IF-EVL-NEXT: store i64 [[TMP18]], ptr [[C_GEP]], align 8 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; IF-EVL-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; IF-EVL: [[EXIT]]: ; IF-EVL-NEXT: ret void ; @@ -361,20 +319,6 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[EXIT:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[LOOP:.*]] -; IF-EVL: [[LOOP]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8 -; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i64, ptr [[B_GEP]], align 8 -; IF-EVL-NEXT: [[TMP18:%.*]] = urem i64 [[TMP16]], [[TMP17]] -; IF-EVL-NEXT: [[C_GEP:%.*]] = getelementptr i64, ptr [[C]], i64 [[IV]] -; IF-EVL-NEXT: store i64 [[TMP18]], ptr [[C_GEP]], align 8 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; IF-EVL-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; IF-EVL: [[EXIT]]: ; IF-EVL-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll index b153328..c7ba826 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll @@ -42,19 +42,6 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[FOR_END:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] -; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP24:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]] -; IF-EVL-NEXT: [[TMP24]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[TMP24]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[INDVARS]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL: [[FOR_END]]: ; IF-EVL-NEXT: ret void ; @@ -167,23 +154,9 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP23]] ; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[FOR_END:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] -; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP31:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ 22, %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]] -; IF-EVL-NEXT: [[TMP31]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[FOR2]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[INDVARS]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3]] ; IF-EVL: [[FOR_END]]: ; IF-EVL-NEXT: ret void ; @@ -316,25 +289,9 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP27]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP27]] ; IF-EVL-NEXT: [[TMP26:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[FOR_END:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] -; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP38:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ 22, %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR3:%.*]] = phi i32 [ 11, %[[SCALAR_PH]] ], [ [[FOR2]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]] -; IF-EVL-NEXT: [[TMP38]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR2]], [[FOR3]] -; IF-EVL-NEXT: [[ADD1:%.*]] = add i32 [[ADD]], [[FOR1]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]] -; IF-EVL-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[INDVARS]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3]] ; IF-EVL: [[FOR_END]]: ; IF-EVL-NEXT: ret void ; @@ -469,7 +426,7 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: store <vscale x 4 x i32> [[TMP11]], ptr [[TMP12]], align 4 ; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS]], [[TMP3]] ; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() ; IF-EVL-NEXT: [[TMP15:%.*]] = mul nuw i32 [[TMP14]], 4 @@ -495,7 +452,7 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 ; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[IV]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: [[FOR_END]]: ; IF-EVL-NEXT: [[FOR1_LCSSA:%.*]] = phi i32 [ [[FOR1]], %[[FOR_BODY]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], %[[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[FOR1_LCSSA]] @@ -613,20 +570,9 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) { ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[FOR_END:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] -; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV1_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i64 [ 33, %[[SCALAR_PH]] ], [ [[TMP14:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[TMP14]] = add i64 [[IV1]], 42 -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i64, ptr [[A]], i64 [[IV1]] -; IF-EVL-NEXT: store i64 [[FOR1]], ptr [[ARRAYIDX]], align 8 -; IF-EVL-NEXT: [[IV1_NEXT]] = add nuw nsw i64 [[IV1]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV1_NEXT]], [[TC]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3]] ; IF-EVL: [[FOR_END]]: ; IF-EVL-NEXT: ret void ; @@ -713,13 +659,11 @@ for.end: ; IF-EVL: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; IF-EVL: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; IF-EVL: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; IF-EVL: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} -; IF-EVL: [[META4]] = !{!"llvm.loop.vectorize.enable", i1 true} +; IF-EVL: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} +; IF-EVL: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} ; IF-EVL: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} -; IF-EVL: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} +; IF-EVL: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]} ; IF-EVL: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} -; IF-EVL: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]} -; IF-EVL: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]} ;. ; NO-VP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; NO-VP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll index df550ec..b9a4e97 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll @@ -30,21 +30,9 @@ define i32 @add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD]] = add nsw i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ADD_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP15]] ; ; NO-VP-LABEL: @add( ; NO-VP-NEXT: entry: @@ -129,7 +117,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[TMP5]] = mul i32 [[VEC_PHI1]], [[TMP4]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP5]], [[MUL]] ; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] @@ -146,7 +134,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[MUL1]] = mul nsw i32 [[TMP0]], [[RDX1]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL1]], [[FOR_BODY1]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[MUL_LCSSA]] @@ -231,23 +219,11 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[OR]] = or i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[OR_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP15]] ; ; NO-VP-LABEL: @or( ; NO-VP-NEXT: entry: @@ -327,23 +303,11 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[AND]] = and i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[AND_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP15]] ; ; NO-VP-LABEL: @and( ; NO-VP-NEXT: entry: @@ -423,23 +387,11 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[XOR]] = xor i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[XOR_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP15]] ; ; NO-VP-LABEL: @xor( ; NO-VP-NEXT: entry: @@ -519,24 +471,11 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP17]], [[RDX]] -; IF-EVL-NEXT: [[SMIN]] = select i1 [[CMP_I]], i32 [[TMP17]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[SMIN_LCSSA:%.*]] = phi i32 [ [[SMIN]], [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[SMIN_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_MINMAX]] ; ; NO-VP-LABEL: @smin( ; NO-VP-NEXT: entry: @@ -618,24 +557,11 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp sgt i32 [[TMP17]], [[RDX]] -; IF-EVL-NEXT: [[SMAX]] = select i1 [[CMP_I]], i32 [[TMP17]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[SMAX_LCSSA:%.*]] = phi i32 [ [[SMAX]], [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[SMAX_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_MINMAX]] ; ; NO-VP-LABEL: @smax( ; NO-VP-NEXT: entry: @@ -717,24 +643,11 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[TMP17]], [[RDX]] -; IF-EVL-NEXT: [[UMIN]] = select i1 [[CMP_I]], i32 [[TMP17]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[UMIN_LCSSA:%.*]] = phi i32 [ [[UMIN]], [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[UMIN_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_MINMAX]] ; ; NO-VP-LABEL: @umin( ; NO-VP-NEXT: entry: @@ -816,24 +729,11 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP17]], [[RDX]] -; IF-EVL-NEXT: [[UMAX]] = select i1 [[CMP_I]], i32 [[TMP17]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[UMAX_LCSSA:%.*]] = phi i32 [ [[UMAX]], [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[UMAX_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_MINMAX]] ; ; NO-VP-LABEL: @umax( ; NO-VP-NEXT: entry: @@ -915,23 +815,11 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD]] = fadd reassoc float [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[ADD_LCSSA]] +; IF-EVL-NEXT: ret float [[TMP15]] ; ; NO-VP-LABEL: @fadd( ; NO-VP-NEXT: entry: @@ -1016,7 +904,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP5]] = fmul reassoc float [[VEC_PHI1]], [[TMP4]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[BIN_RDX:%.*]] = fmul reassoc float [[TMP5]], [[MUL]] ; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] @@ -1033,7 +921,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MUL1]] = fmul reassoc float [[TMP0]], [[RDX1]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP24:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP15:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL1]], [[FOR_BODY1]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MUL_LCSSA]] @@ -1119,24 +1007,11 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP17]], [[RDX]] -; IF-EVL-NEXT: [[MIN]] = select i1 [[CMP]], float [[TMP17]], float [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY]] ], [ [[RDX_MINMAX_SELECT]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[MIN_LCSSA]] +; IF-EVL-NEXT: ret float [[RDX_MINMAX_SELECT]] ; ; NO-VP-LABEL: @fmin( ; NO-VP-NEXT: entry: @@ -1220,24 +1095,11 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast ogt float [[TMP17]], [[RDX]] -; IF-EVL-NEXT: [[MAX]] = select i1 [[CMP]], float [[TMP17]], float [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY]] ], [ [[RDX_MINMAX_SELECT]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[MAX_LCSSA]] +; IF-EVL-NEXT: ret float [[RDX_MINMAX_SELECT]] ; ; NO-VP-LABEL: @fmax( ; NO-VP-NEXT: entry: @@ -1324,7 +1186,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]]) @@ -1342,7 +1204,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MIN]] = tail call float @llvm.minimum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP30:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP19:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MIN_LCSSA]] @@ -1432,7 +1294,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]]) @@ -1450,7 +1312,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MAX]] = tail call float @llvm.maximum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP32:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP21:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MAX_LCSSA]] @@ -1539,25 +1401,11 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP21]], float [[TMP22]], float [[RDX]]) -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[MULADD_LCSSA]] +; IF-EVL-NEXT: ret float [[TMP18]] ; ; NO-VP-LABEL: @fmuladd( ; NO-VP-NEXT: entry: @@ -1644,27 +1492,14 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP16]]) ; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]] ; IF-EVL-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP20]], i32 [[INV:%.*]], i32 [[START:%.*]] -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP21]], 3 -; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_SELECT]] ; ; NO-VP-LABEL: @anyof_icmp( ; NO-VP-NEXT: entry: @@ -1749,27 +1584,14 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP16]]) ; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]] ; IF-EVL-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP20]], i32 [[INV:%.*]], i32 [[START:%.*]] -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = fcmp fast olt float [[TMP21]], 3.000000e+00 -; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_SELECT]] ; ; NO-VP-LABEL: @anyof_fcmp( ; NO-VP-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll index 7c05f46..0c22a9e 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll @@ -32,21 +32,7 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) { ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 0 -; IF-EVL-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 1 -; IF-EVL-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP12]] -; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: for.cond.cleanup: ; IF-EVL-NEXT: ret void ; @@ -156,30 +142,12 @@ define i32 @load_factor_4_with_gap(i64 %n, ptr noalias %a) { ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP12]]) -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 0 -; IF-EVL-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[RDX]], [[TMP16]] -; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 1 -; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[TMP17]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 3 -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[ADD2]] = add nsw i32 [[ADD1]], [[TMP18]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]] ; IF-EVL: exit: -; IF-EVL-NEXT: [[ADD2_LCSSA:%.*]] = phi i32 [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ADD2_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP15]] ; ; NO-VP-LABEL: @load_factor_4_with_gap( ; NO-VP-NEXT: entry: @@ -299,22 +267,9 @@ define void @store_factor_4_with_gap(i32 %n, ptr noalias %a) { ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP6]] ; IF-EVL-NEXT: [[VEC_IND_NEXT5]] = add <vscale x 4 x i32> [[VEC_IND2]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP8:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[TMP15:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 0 -; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 1 -; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX1]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 3 -; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[TMP15]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; @@ -427,30 +382,12 @@ define i32 @load_factor_4_with_tail_gap(i64 %n, ptr noalias %a) { ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP12]]) -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 0 -; IF-EVL-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[RDX]], [[TMP16]] -; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 1 -; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[TMP17]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 2 -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[ADD2]] = add nsw i32 [[ADD1]], [[TMP18]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]] ; IF-EVL: exit: -; IF-EVL-NEXT: [[ADD2_LCSSA:%.*]] = phi i32 [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ADD2_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP15]] ; ; NO-VP-LABEL: @load_factor_4_with_tail_gap( ; NO-VP-NEXT: entry: @@ -571,22 +508,9 @@ define void @store_factor_4_with_tail_gap(i32 %n, ptr noalias %a) { ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP6]] ; IF-EVL-NEXT: [[VEC_IND_NEXT5]] = add <vscale x 4 x i32> [[VEC_IND2]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP8:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[TMP15:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 0 -; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 1 -; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX1]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 2 -; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[TMP15]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; @@ -697,33 +621,12 @@ define i32 @load_factor_4_reverse(i64 %n, ptr noalias %a) { ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]] ; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP16]]) -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[N]], [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ADD3:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 0 -; IF-EVL-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[RDX]], [[TMP20]] -; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 1 -; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[TMP21]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 2 -; IF-EVL-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[ADD2:%.*]] = add nsw i32 [[ADD1]], [[TMP22]] -; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 3 -; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4 -; IF-EVL-NEXT: [[ADD3]] = add nsw i32 [[ADD2]], [[TMP23]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; IF-EVL-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[IV_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[EXIT]] ; IF-EVL: exit: -; IF-EVL-NEXT: [[ADD3_LCSSA:%.*]] = phi i32 [ [[ADD3]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ADD3_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP19]] ; ; NO-VP-LABEL: @load_factor_4_reverse( ; NO-VP-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll index 00c88a46..1aea6aa 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll @@ -26,18 +26,7 @@ define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) { ; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV1:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[IV1]] -; IF-EVL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[IV1]] -; IF-EVL-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX4]], align 4 -; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i32 [[IV1]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT1]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY1]] ; IF-EVL: for.cond.cleanup: ; IF-EVL-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll index a03b430..e94e64f 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll @@ -32,17 +32,6 @@ define void @trip_count_max_1024(ptr %p, i64 %tc) vscale_range(2, 1024) { ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT_LOOPEXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]] -; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8 -; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1 -; CHECK-NEXT: store i64 [[Y]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[I_NEXT]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp uge i64 [[I_NEXT]], [[TC]] -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT_LOOPEXIT]], label %[[LOOP]] ; CHECK: [[EXIT_LOOPEXIT]]: ; CHECK-NEXT: br label %[[EXIT]] ; CHECK: [[EXIT]]: @@ -92,17 +81,6 @@ define void @overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) { ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT_LOOPEXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]] -; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8 -; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1 -; CHECK-NEXT: store i64 [[Y]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[I_NEXT]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[TC]] -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT_LOOPEXIT]], label %[[LOOP]] ; CHECK: [[EXIT_LOOPEXIT]]: ; CHECK-NEXT: br label %[[EXIT]] ; CHECK: [[EXIT]]: @@ -152,17 +130,6 @@ define void @no_overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) { ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT_LOOPEXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]] -; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8 -; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1 -; CHECK-NEXT: store i64 [[Y]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[I_NEXT]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[TC_ADD]] -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT_LOOPEXIT]], label %[[LOOP]] ; CHECK: [[EXIT_LOOPEXIT]]: ; CHECK-NEXT: br label %[[EXIT]] ; CHECK: [[EXIT]]: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll index 58b4c53..b13c671 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll @@ -30,25 +30,7 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) { ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: -; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ 0, [[SCALAR_PH:%.*]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I_011]] -; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP1:%.*]] = icmp ne i32 [[TMP23]], 0 -; IF-EVL-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; IF-EVL: if.then: -; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I_011]] -; IF-EVL-NEXT: [[TMP24:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add i32 [[TMP23]], [[TMP24]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX3]], align 4 -; IF-EVL-NEXT: br label [[FOR_INC]] -; IF-EVL: for.inc: -; IF-EVL-NEXT: [[INC]] = add nuw nsw i64 [[I_011]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]] +; IF-EVL-NEXT: br label [[FOR_INC:%.*]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll index 6c487ab..dcb7bf4 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll @@ -29,21 +29,9 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) { ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD]] = fadd float [[TMP17]], [[SUM_07]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[ADD_LCSSA]] +; IF-EVL-NEXT: ret float [[TMP14]] ; ; NO-VP-LABEL: @fadd( ; NO-VP-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll index e14ff7c..7179e7d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll @@ -30,21 +30,9 @@ define i32 @add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP14]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD]] = add nsw i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ADD_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP17]] ; ; NO-VP-LABEL: @add( ; NO-VP-NEXT: entry: @@ -129,7 +117,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[TMP4]] = mul <8 x i32> [[WIDE_LOAD2]], [[VEC_PHI1]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP6:%.*]] = mul <8 x i32> [[TMP4]], [[TMP5]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[TMP6]]) @@ -147,7 +135,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[MUL]] = mul nsw i32 [[TMP0]], [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], [[FOR_BODY1]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[MUL_LCSSA]] @@ -233,24 +221,12 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[TMP14]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[OR]] = or i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[OR_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP17]] ; ; NO-VP-LABEL: @or( ; NO-VP-NEXT: entry: @@ -332,24 +308,12 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[TMP14]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[AND]] = and i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[AND_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP17]] ; ; NO-VP-LABEL: @and( ; NO-VP-NEXT: entry: @@ -431,24 +395,12 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP14]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[XOR]] = xor i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[XOR_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP17]] ; ; NO-VP-LABEL: @xor( ; NO-VP-NEXT: entry: @@ -532,25 +484,12 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP15]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP19]], [[RDX]] -; IF-EVL-NEXT: [[SMIN]] = select i1 [[CMP_I]], i32 [[TMP19]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[SMIN_LCSSA:%.*]] = phi i32 [ [[SMIN]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[SMIN_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP18]] ; ; NO-VP-LABEL: @smin( ; NO-VP-NEXT: entry: @@ -638,25 +577,12 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32> [[TMP15]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp sgt i32 [[TMP19]], [[RDX]] -; IF-EVL-NEXT: [[SMAX]] = select i1 [[CMP_I]], i32 [[TMP19]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[SMAX_LCSSA:%.*]] = phi i32 [ [[SMAX]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[SMAX_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP18]] ; ; NO-VP-LABEL: @smax( ; NO-VP-NEXT: entry: @@ -744,25 +670,12 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32> [[TMP15]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[TMP19]], [[RDX]] -; IF-EVL-NEXT: [[UMIN]] = select i1 [[CMP_I]], i32 [[TMP19]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[UMIN_LCSSA:%.*]] = phi i32 [ [[UMIN]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[UMIN_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP18]] ; ; NO-VP-LABEL: @umin( ; NO-VP-NEXT: entry: @@ -850,25 +763,12 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> [[TMP15]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP19]], [[RDX]] -; IF-EVL-NEXT: [[UMAX]] = select i1 [[CMP_I]], i32 [[TMP19]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[UMAX_LCSSA:%.*]] = phi i32 [ [[UMAX]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[UMAX_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP18]] ; ; NO-VP-LABEL: @umax( ; NO-VP-NEXT: entry: @@ -954,24 +854,12 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP14]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD]] = fadd reassoc float [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[ADD_LCSSA]] +; IF-EVL-NEXT: ret float [[TMP17]] ; ; NO-VP-LABEL: @fadd( ; NO-VP-NEXT: entry: @@ -1056,7 +944,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP4]] = fmul reassoc <8 x float> [[WIDE_LOAD2]], [[VEC_PHI1]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP6:%.*]] = fmul reassoc <8 x float> [[TMP4]], [[TMP5]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v8f32(float 1.000000e+00, <8 x float> [[TMP6]]) @@ -1074,7 +962,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[TMP0]], [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP24:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP15:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL]], [[FOR_BODY1]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MUL_LCSSA]] @@ -1162,25 +1050,12 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[TMP15]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP19]], [[RDX]] -; IF-EVL-NEXT: [[MIN]] = select i1 [[CMP]], float [[TMP19]], float [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[MIN_LCSSA]] +; IF-EVL-NEXT: ret float [[TMP18]] ; ; NO-VP-LABEL: @fmin( ; NO-VP-NEXT: entry: @@ -1268,25 +1143,12 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[TMP15]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast ogt float [[TMP19]], [[RDX]] -; IF-EVL-NEXT: [[MAX]] = select i1 [[CMP]], float [[TMP19]], float [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[MAX_LCSSA]] +; IF-EVL-NEXT: ret float [[TMP18]] ; ; NO-VP-LABEL: @fmax( ; NO-VP-NEXT: entry: @@ -1375,7 +1237,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]]) @@ -1393,7 +1255,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MIN]] = tail call float @llvm.minimum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP30:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP19:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MIN_LCSSA]] @@ -1483,7 +1345,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]]) @@ -1501,7 +1363,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MAX]] = tail call float @llvm.maximum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP32:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP21:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MAX_LCSSA]] @@ -1590,26 +1452,12 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]] ; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP20:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP17]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP21]], float [[TMP22]], float [[RDX]]) -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP20]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[MULADD_LCSSA]] +; IF-EVL-NEXT: ret float [[TMP20]] ; ; NO-VP-LABEL: @fmuladd( ; NO-VP-NEXT: entry: @@ -1696,27 +1544,14 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP15]]) ; IF-EVL-NEXT: [[TMP19:%.*]] = freeze i1 [[TMP18]] ; IF-EVL-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP19]], i32 [[INV:%.*]], i32 [[START:%.*]] -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP20]], 3 -; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_SELECT]] ; ; NO-VP-LABEL: @anyof_icmp( ; NO-VP-NEXT: entry: @@ -1801,27 +1636,14 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP15]]) ; IF-EVL-NEXT: [[TMP19:%.*]] = freeze i1 [[TMP18]] ; IF-EVL-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP19]], i32 [[INV:%.*]], i32 [[START:%.*]] -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = fcmp fast olt float [[TMP20]], 3.000000e+00 -; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_SELECT]] ; ; NO-VP-LABEL: @anyof_fcmp( ; NO-VP-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll index 5b9bc50..e70894b 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll @@ -43,20 +43,7 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt ; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[LOOPEND:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[SCALAR_PH:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 -; IF-EVL-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[ADD]] -; IF-EVL-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 -; IF-EVL-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2]], i64 [[ADD]] -; IF-EVL-NEXT: store i32 [[TMP]], ptr [[GEPS]], align 4 -; IF-EVL-NEXT: [[INC]] = add i32 [[I]], 1 -; IF-EVL-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024 -; IF-EVL-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND]] ; IF-EVL: loopend: ; IF-EVL-NEXT: ret void ; @@ -179,27 +166,7 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal ; IF-EVL-NEXT: [[TMP29:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[LOOPEND:%.*]] -; IF-EVL: scalar.ph: -; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[SCALAR_PH:%.*]] ], [ [[ADD:%.*]], [[FOR_INC:%.*]] ] -; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_INC]] ] -; IF-EVL-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 -; IF-EVL-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i32 [[I]] -; IF-EVL-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 -; IF-EVL-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP]], 100 -; IF-EVL-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; IF-EVL: if.then: -; IF-EVL-NEXT: [[GEPL1:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i64 [[ADD]] -; IF-EVL-NEXT: [[V:%.*]] = load i32, ptr [[GEPL1]], align 4 -; IF-EVL-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2]], i64 [[ADD]] -; IF-EVL-NEXT: store i32 [[V]], ptr [[GEPS]], align 4 -; IF-EVL-NEXT: br label [[FOR_INC]] -; IF-EVL: for.inc: -; IF-EVL-NEXT: [[INC]] = add i32 [[I]], 1 -; IF-EVL-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024 -; IF-EVL-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND]] +; IF-EVL-NEXT: br label [[FOR_INC:%.*]] ; IF-EVL: loopend: ; IF-EVL-NEXT: ret void ; @@ -351,22 +318,7 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr ; IF-EVL-NEXT: [[TMP32:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[LOOP:%.*]] -; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 1024, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; IF-EVL-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[X:%.*]] = load i8, ptr [[GEP_A]], align 1 -; IF-EVL-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i8 [[X]] -; IF-EVL-NEXT: [[Y:%.*]] = load i8, ptr [[GEP_B]], align 1 -; IF-EVL-NEXT: [[GEP_C:%.*]] = getelementptr i8, ptr [[C]], i64 [[IV]] -; IF-EVL-NEXT: store i8 [[Y]], ptr [[GEP_C]], align 1 -; IF-EVL-NEXT: [[GEP_D:%.*]] = getelementptr i8, ptr [[D]], i64 [[IV]] -; IF-EVL-NEXT: store i8 [[Y]], ptr [[GEP_D]], align 1 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], -1 -; IF-EVL-NEXT: [[CMP_NOT:%.*]] = icmp eq i64 [[IV]], 0 -; IF-EVL-NEXT: br i1 [[CMP_NOT]], label [[EXIT]], label [[LOOP]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll index b13f97d..e1c62fe 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll @@ -31,19 +31,7 @@ define void @test(ptr %p) { ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[LOOP:%.*]] -; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 8 -; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 200 -; IF-EVL-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; IF-EVL-NEXT: store i64 [[V]], ptr [[A2]], align 8 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; IF-EVL-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; @@ -125,19 +113,7 @@ define void @test_may_clobber1(ptr %p) { ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[LOOP:%.*]] -; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 100 -; IF-EVL-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; IF-EVL-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; IF-EVL-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; @@ -157,19 +133,7 @@ define void @test_may_clobber1(ptr %p) { ; NO-VP-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; NO-VP-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; NO-VP: middle.block: -; NO-VP-NEXT: br label [[EXIT:%.*]] -; NO-VP: scalar.ph: ; NO-VP-NEXT: br label [[LOOP:%.*]] -; NO-VP: loop: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; NO-VP-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 100 -; NO-VP-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; NO-VP: exit: ; NO-VP-NEXT: ret void ; @@ -259,19 +223,7 @@ define void @test_may_clobber3(ptr %p) { ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[LOOP:%.*]] -; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 10 -; IF-EVL-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; IF-EVL-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; IF-EVL-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; @@ -291,19 +243,7 @@ define void @test_may_clobber3(ptr %p) { ; NO-VP-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; NO-VP-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; NO-VP: middle.block: -; NO-VP-NEXT: br label [[EXIT:%.*]] -; NO-VP: scalar.ph: ; NO-VP-NEXT: br label [[LOOP:%.*]] -; NO-VP: loop: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; NO-VP-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 10 -; NO-VP-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; NO-VP: exit: ; NO-VP-NEXT: ret void ; @@ -347,19 +287,7 @@ define void @trivial_due_max_vscale(ptr %p) { ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[LOOP:%.*]] -; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 8192 -; IF-EVL-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; IF-EVL-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; IF-EVL-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; @@ -446,19 +374,7 @@ define void @no_high_lmul_or_interleave(ptr %p) { ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[LOOP:%.*]] -; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 1024 -; IF-EVL-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; IF-EVL-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 3001 -; IF-EVL-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll index 0bb7ad0..f804329 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll @@ -38,16 +38,6 @@ define void @lshift_significand(i32 %n, ptr nocapture writeonly %dst) { ; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[TMP22:%.*]] = sub nuw nsw i64 1, [[IV1]] -; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP22]] -; CHECK-NEXT: store i64 0, ptr [[ARRAYIDX14]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 3 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll index 5c89f21..c5319c6 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll @@ -31,20 +31,6 @@ define void @test_pr98413_zext_removed(ptr %src, ptr noalias %dst, i64 %x) { ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC]], align 8 -; CHECK-NEXT: [[EXT_L:%.*]] = zext i16 [[L]] to i64 -; CHECK-NEXT: [[AND:%.*]] = and i64 [[X]], [[EXT_L]] -; CHECK-NEXT: [[TRUNC_AND:%.*]] = trunc i64 [[AND]] to i8 -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i8 [[TRUNC_AND]], ptr [[GEP_DST]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 96 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -95,20 +81,6 @@ define void @test_pr98413_sext_removed(ptr %src, ptr noalias %dst, i64 %x) { ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC]], align 8 -; CHECK-NEXT: [[EXT_L:%.*]] = sext i16 [[L]] to i64 -; CHECK-NEXT: [[AND:%.*]] = and i64 [[X]], [[EXT_L]] -; CHECK-NEXT: [[TRUNC_AND:%.*]] = trunc i64 [[AND]] to i8 -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i8 [[TRUNC_AND]], ptr [[GEP_DST]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 96 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -151,21 +123,6 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 { ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[F_039:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[TMP4:%.*]] = or i8 23, [[X]] -; CHECK-NEXT: [[EXTRACT_T:%.*]] = trunc i8 [[TMP4]] to i1 -; CHECK-NEXT: br i1 [[EXTRACT_T]], label %[[THEN:.*]], label %[[LOOP_LATCH]] -; CHECK: [[THEN]]: -; CHECK-NEXT: store i8 0, ptr [[DST]], align 1 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[ADD]] = add i8 [[F_039]], 1 -; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[F_039]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV]], 8 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -260,23 +217,6 @@ define void @icmp_only_first_op_truncated(ptr noalias %dst, i32 %x, i64 %N, i64 ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[T1:%.*]] = trunc i64 [[N]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[T1]], [[T]] -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[X]] to i64 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr double, ptr [[SRC]], i64 [[IDXPROM]] -; CHECK-NEXT: [[RETVAL:%.*]] = load double, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: store double [[RETVAL]], ptr [[DST]], align 8 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[V]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll index 6efb035..000dc4a 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll @@ -22,20 +22,6 @@ define void @truncate_to_minimal_bitwidths_widen_cast_recipe(ptr %src) { ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP_SRC1:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV1]] -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr [[GEP_SRC1]], align 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP11]] to i32 -; CHECK-NEXT: [[MUL16:%.*]] = mul i32 0, [[CONV]] -; CHECK-NEXT: [[SHR35:%.*]] = lshr i32 [[MUL16]], 1 -; CHECK-NEXT: [[CONV36:%.*]] = trunc i32 [[SHR35]] to i8 -; CHECK-NEXT: store i8 [[CONV36]], ptr null, align 1 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV1]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV1]], 8 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll index 9095d6e..bae97e5 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll @@ -29,16 +29,6 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6 ; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] -; SCALABLE: [[SCALAR_PH:.*]]: -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -97,16 +87,6 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6 ; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] -; TF-SCALABLE: [[SCALAR_PH:.*]]: -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 -; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -292,22 +272,6 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] -; SCALABLE: [[SCALAR_PH:.*]]: -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10 -; SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_LOAD:.*]], label %[[LATCH]] -; SCALABLE: [[DO_LOAD]]: -; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 -; SCALABLE-NEXT: br label %[[LATCH]] -; SCALABLE: [[LATCH]]: -; SCALABLE-NEXT: [[PHI:%.*]] = phi i64 [ 0, %[[FOR_BODY]] ], [ [[V]], %[[DO_LOAD]] ] -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -389,22 +353,6 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; TF-SCALABLE-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] -; TF-SCALABLE: [[SCALAR_PH:.*]]: -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; TF-SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10 -; TF-SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_LOAD:.*]], label %[[LATCH]] -; TF-SCALABLE: [[DO_LOAD]]: -; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 -; TF-SCALABLE-NEXT: br label %[[LATCH]] -; TF-SCALABLE: [[LATCH]]: -; TF-SCALABLE-NEXT: [[PHI:%.*]] = phi i64 [ 0, %[[FOR_BODY]] ], [ [[V]], %[[DO_LOAD]] ] -; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -451,19 +399,9 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; SCALABLE-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] -; SCALABLE: [[SCALAR_PH:.*]]: -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 1 -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -519,19 +457,9 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] -; TF-SCALABLE: [[SCALAR_PH:.*]]: -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 1 -; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -571,19 +499,9 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; SCALABLE-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] -; SCALABLE: [[SCALAR_PH:.*]]: -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8 -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -639,19 +557,9 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]] ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] -; TF-SCALABLE: [[SCALAR_PH:.*]]: -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8 -; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -700,19 +608,9 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]] ; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] -; SCALABLE: [[SCALAR_PH:.*]]: -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; SCALABLE-NEXT: store i64 [[IV]], ptr [[B]], align 8 -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -781,19 +679,9 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]] ; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] -; TF-SCALABLE: [[SCALAR_PH:.*]]: -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-SCALABLE-NEXT: store i64 [[IV]], ptr [[B]], align 8 -; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -843,24 +731,9 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] ; SCALABLE-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] -; SCALABLE: [[SCALAR_PH:.*]]: -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10 -; SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_STORE:.*]], label %[[LATCH]] -; SCALABLE: [[DO_STORE]]: -; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8 -; SCALABLE-NEXT: br label %[[LATCH]] -; SCALABLE: [[LATCH]]: -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -939,24 +812,9 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] ; TF-SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] -; TF-SCALABLE: [[SCALAR_PH:.*]]: -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; TF-SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10 -; TF-SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_STORE:.*]], label %[[LATCH]] -; TF-SCALABLE: [[DO_STORE]]: -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8 -; TF-SCALABLE-NEXT: br label %[[LATCH]] -; TF-SCALABLE: [[LATCH]]: -; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -1002,19 +860,9 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; SCALABLE-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] -; SCALABLE: [[SCALAR_PH:.*]]: -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1 -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -1070,19 +918,9 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]] ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] -; TF-SCALABLE: [[SCALAR_PH:.*]]: -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1 -; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll index 8c67b4c..1676461 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll @@ -15,15 +15,6 @@ define void @foo(ptr %arg) #0 { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr [3 x i64], ptr [[ARG]], i64 0, i64 [[IV]] -; CHECK-NEXT: store i64 0, ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 3 -; CHECK-NEXT: br i1 [[COND]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -61,18 +52,8 @@ define i32 @test_remove_iv(i32 %start) #0 { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP5]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[START]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED_NEXT]] = xor i32 [[RED]], 3 -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 5 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP6]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll index 649ce60..0a64723 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll @@ -30,21 +30,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP22]] -; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; IF-EVL: for.cond.cleanup: ; IF-EVL-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll index b0f0c39..b106f99 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll @@ -25,11 +25,7 @@ define i32 @foo(ptr nocapture %A) { ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 poison ; @@ -76,11 +72,7 @@ define i32 @foo1(ptr nocapture noalias %A, ptr nocapture %PtrPtr) { ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000 ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 poison ; diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll index 1d4cbc3..78c71fd 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll @@ -38,15 +38,6 @@ define void @test_scalar_steps_target_instruction_cost(ptr %dst) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i64 [[IV]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 3 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV]], 22 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll index a423f06..02e82b4 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll @@ -91,23 +91,7 @@ define void @test(ptr %p, i40 %a) { ; CHECK: pred.store.continue30: ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[SHL:%.*]] = shl i40 [[A]], 24 -; CHECK-NEXT: [[ASHR:%.*]] = ashr i40 [[SHL]], 28 -; CHECK-NEXT: [[TRUNC:%.*]] = trunc i40 [[ASHR]] to i32 -; CHECK-NEXT: [[ICMP_EQ:%.*]] = icmp eq i32 [[TRUNC]], 0 -; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[ICMP_EQ]] to i32 -; CHECK-NEXT: [[ICMP_ULT:%.*]] = icmp ult i32 0, [[ZEXT]] -; CHECK-NEXT: [[OR:%.*]] = or i1 [[ICMP_ULT]], true -; CHECK-NEXT: [[ICMP_SGT:%.*]] = icmp sgt i1 [[OR]], false -; CHECK-NEXT: store i1 [[ICMP_SGT]], ptr [[P]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[IV_NEXT]], 10 -; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll index 3c788b2..ee84ef2 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll @@ -63,19 +63,7 @@ define void @func_21() { ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 6 ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[LV:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[A_PTR:%.*]] = getelementptr inbounds [5 x i32], ptr @A, i64 0, i64 [[INDVARS_IV]] -; CHECK-NEXT: [[LV]] = load i32, ptr [[A_PTR]], align 4 -; CHECK-NEXT: [[B_PTR:%.*]] = getelementptr inbounds [5 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; CHECK-NEXT: store i32 [[SCALAR_RECUR]], ptr [[B_PTR]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 5 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/scalar-steps-with-users-demanding-all-lanes-and-first-lane-only.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/scalar-steps-with-users-demanding-all-lanes-and-first-lane-only.ll index d40cb6e..cfb1805 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/scalar-steps-with-users-demanding-all-lanes-and-first-lane-only.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/scalar-steps-with-users-demanding-all-lanes-and-first-lane-only.ll @@ -66,25 +66,6 @@ define void @test_scalar_iv_steps_used_by_replicate_and_first_lane_only_vpinst(p ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[MUL_IV:%.*]] = mul nsw i64 [[IV]], 4 -; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds i8, ptr [[SRC_1]], i64 [[MUL_IV]] -; CHECK-NEXT: [[L_1:%.*]] = load i8, ptr [[GEP_SRC_1]], align 1 -; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[L_1]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[IV_OR:%.*]] = or disjoint i64 [[IV]], 4 -; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds [8 x i32], ptr @src, i64 0, i64 [[IV_OR]] -; CHECK-NEXT: [[L_2:%.*]] = load i32, ptr [[GEP_SRC]], align 4 -; CHECK-NEXT: store i32 [[L_2]], ptr [[DST]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 4 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll b/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll index 9dd7e9f..f65a9d7 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll @@ -22,19 +22,7 @@ define void @f1() { ; CHECK-NEXT: store <2 x ptr> <ptr @a, ptr @a>, ptr [[TMP1]], align 8 ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[BB3:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[BB2:%.*]] -; CHECK: bb2: -; CHECK-NEXT: [[C_1_0:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[_TMP9:%.*]], [[BB2]] ] -; CHECK-NEXT: [[_TMP1:%.*]] = zext i16 0 to i64 -; CHECK-NEXT: [[_TMP2:%.*]] = getelementptr [1 x %rec8], ptr @a, i16 0, i64 [[_TMP1]] -; CHECK-NEXT: [[_TMP6:%.*]] = sext i16 [[C_1_0]] to i64 -; CHECK-NEXT: [[_TMP7:%.*]] = getelementptr [2 x ptr], ptr @b, i16 0, i64 [[_TMP6]] -; CHECK-NEXT: store ptr [[_TMP2]], ptr [[_TMP7]], align 8 -; CHECK-NEXT: [[_TMP9]] = add nsw i16 [[C_1_0]], 1 -; CHECK-NEXT: [[_TMP11:%.*]] = icmp slt i16 [[_TMP9]], 2 -; CHECK-NEXT: br i1 [[_TMP11]], label [[BB2]], label [[BB3]] ; CHECK: bb3: ; CHECK-NEXT: ret void ; @@ -102,25 +90,7 @@ define void @redundant_or_1(ptr %dst, i1 %c.0, i1 %c.1) { ; CHECK: pred.store.continue8: ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: br i1 [[C_1]], label [[LOOP_LATCH]], label [[THEN_1:%.*]] -; CHECK: then.1: -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2 -; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], true -; CHECK-NEXT: [[COND:%.*]] = select i1 [[OR]], i1 [[C_0]], i1 false -; CHECK-NEXT: br i1 [[COND]], label [[THEN_2:%.*]], label [[LOOP_LATCH]] -; CHECK: then.2: -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[IV]] -; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 3 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -195,25 +165,7 @@ define void @redundant_or_2(ptr %dst, i1 %c.0, i1 %c.1) { ; CHECK: pred.store.continue8: ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[THEN_1:%.*]] -; CHECK: then.1: -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2 -; CHECK-NEXT: [[OR:%.*]] = or i1 true, [[CMP]] -; CHECK-NEXT: [[COND:%.*]] = select i1 [[OR]], i1 [[C_1]], i1 false -; CHECK-NEXT: br i1 [[COND]], label [[THEN_2:%.*]], label [[LOOP_LATCH]] -; CHECK: then.2: -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[IV]] -; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 3 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -289,25 +241,7 @@ define void @redundant_and_1(ptr %dst, i1 %c.0, i1 %c.1) { ; CHECK: pred.store.continue8: ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[THEN_1:%.*]] -; CHECK: then.1: -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2 -; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], false -; CHECK-NEXT: [[COND:%.*]] = select i1 [[OR]], i1 [[C_1]], i1 false -; CHECK-NEXT: br i1 [[COND]], label [[THEN_2:%.*]], label [[LOOP_LATCH]] -; CHECK: then.2: -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[IV]] -; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 3 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -341,6 +275,23 @@ exit: define void @redundant_and_2(ptr %dst, i1 %c.0, i1 %c.1) { ; CHECK-LABEL: @redundant_and_2( ; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] +; CHECK: loop.header: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] +; CHECK-NEXT: br i1 [[C_0:%.*]], label [[LOOP_LATCH]], label [[THEN_1:%.*]] +; CHECK: then.1: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2 +; CHECK-NEXT: [[OR:%.*]] = and i1 false, [[CMP]] +; CHECK-NEXT: [[COND:%.*]] = select i1 [[OR]], i1 [[C_1:%.*]], i1 false +; CHECK-NEXT: br i1 [[COND]], label [[THEN_2:%.*]], label [[LOOP_LATCH]] +; CHECK: then.2: +; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i32 [[IV]] +; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4 +; CHECK-NEXT: br label [[LOOP_LATCH]] +; CHECK: loop.latch: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 3 +; CHECK-NEXT: br i1 [[EC]], label [[EXIT:%.*]], label [[LOOP_HEADER]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-constant-known-via-scev.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-constant-known-via-scev.ll index ee88abb..e0dd376 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/cost-constant-known-via-scev.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/cost-constant-known-via-scev.ll @@ -92,24 +92,8 @@ define i64 @second_lshr_operand_zero_via_scev() { ; CHECK-NEXT: [[BIN_RDX:%.*]] = or <2 x i64> [[TMP11]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> [[BIN_RDX]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOPS:.*]] -; CHECK: [[LOOPS]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOPS]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOPS]] ] -; CHECK-NEXT: [[C:%.*]] = icmp eq i64 [[IV]], 0 -; CHECK-NEXT: [[AND:%.*]] = and i64 [[IV]], 0 -; CHECK-NEXT: [[TMP14:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[TMP14]], [[EXT_0]] -; CHECK-NEXT: [[CONV_1:%.*]] = zext i32 [[SHR]] to i64 -; CHECK-NEXT: [[RED_NEXT_V:%.*]] = select i1 [[C]], i64 [[AND]], i64 [[CONV_1]] -; CHECK-NEXT: [[RED_NEXT]] = or i64 [[RED_NEXT_V]], [[RED]] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOPS]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[RED_NEXT]], %[[LOOPS]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 [[TMP13]] ; entry: %ext.0 = sext i8 0 to i32 diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll index 0ba885d..9453ad7 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll @@ -696,19 +696,9 @@ define i64 @live_in_known_1_via_scev() { ; CHECK-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> [[VEC_PHI]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 3, [[SCALAR_PH]] ], [ [[RED_MUL:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED_MUL]] = mul nsw i64 [[RED]], [[P_EXT]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[RED_MUL]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 [[TMP3]] ; entry: %sel = select i1 false, i32 3, i32 0 @@ -753,22 +743,9 @@ define i64 @cost_loop_invariant_recipes(i1 %x, i64 %y) { ; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> [[TMP3]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT_I_I_I:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 1, [[SCALAR_PH]] ], [ [[RED_MUL:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[NOT_X:%.*]] = xor i1 [[X]], true -; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[NOT_X]] to i64 -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[Y]], [[EXT]] -; CHECK-NEXT: [[RED_MUL]] = mul i64 [[SHL]], [[RED]] -; CHECK-NEXT: [[IV_NEXT_I_I_I]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[RED_MUL_LCSSA:%.*]] = phi i64 [ [[RED_MUL]], [[LOOP]] ], [ [[TMP4]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RED_MUL_LCSSA]] +; CHECK-NEXT: ret i64 [[TMP4]] ; entry: br label %loop @@ -808,20 +785,9 @@ define i32 @narrowed_reduction(ptr %a, i1 %cmp) #0 { ; CHECK: middle.block: ; CHECK-NEXT: [[TMP20:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP5]]) ; CHECK-NEXT: [[TMP21:%.*]] = zext i1 [[TMP20]] to i32 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP1:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, [[VEC_EPILOG_PH:%.*]] ], [ [[INC:%.*]], [[LOOP1]] ] -; CHECK-NEXT: [[OR13:%.*]] = phi i32 [ 0, [[VEC_EPILOG_PH]] ], [ [[OR:%.*]], [[LOOP1]] ] -; CHECK-NEXT: [[AND:%.*]] = and i32 [[OR13]], 1 -; CHECK-NEXT: [[OR]] = or i32 [[AND]], [[CONV]] -; CHECK-NEXT: [[INC]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 16 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP1]] ; CHECK: exit: -; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], [[LOOP1]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[OR_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP21]] ; entry: %conv = zext i1 %cmp to i32 diff --git a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll index 3d07eca..249efe1 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll @@ -39,30 +39,9 @@ define i1 @fn(ptr %nno) #0 { ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP12]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY20:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 10, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC35:%.*]] ] -; CHECK-NEXT: [[SUM_01:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[SUM_1:%.*]], [[FOR_INC35]] ] -; CHECK-NEXT: [[REM4:%.*]] = and i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[CMP21:%.*]] = icmp eq i64 [[REM4]], 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[NNO]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-NEXT: br i1 [[CMP21]], label [[IF_THEN22:%.*]], label [[FOR_INC35]] -; CHECK: if.then: -; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[TMP15]], 1 -; CHECK-NEXT: [[REM27:%.*]] = urem i32 [[MUL]], 10 -; CHECK-NEXT: br label [[FOR_INC35]] -; CHECK: loop.latch: -; CHECK-NEXT: [[REM27_PN:%.*]] = phi i32 [ [[REM27]], [[IF_THEN22]] ], [ [[TMP15]], [[FOR_BODY20]] ] -; CHECK-NEXT: [[SUM_1]] = or i32 [[REM27_PN]], [[SUM_01]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1 -; CHECK-NEXT: [[CMP19_NOT:%.*]] = icmp eq i64 [[INDVARS_IV]], 0 -; CHECK-NEXT: br i1 [[CMP19_NOT]], label [[EXIT]], label [[FOR_BODY20]] +; CHECK-NEXT: br label [[FOR_INC35:%.*]] ; CHECK: exit: -; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_1]], [[FOR_INC35]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[CMP41:%.*]] = icmp eq i32 [[SUM_1_LCSSA]], 0 +; CHECK-NEXT: [[CMP41:%.*]] = icmp eq i32 [[TMP14]], 0 ; CHECK-NEXT: ret i1 [[CMP41]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll index d0c311eb..cc84fab 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll @@ -495,18 +495,7 @@ define void @test_first_order_recurrence_tried_to_scalarized(ptr %dst, i1 %c, i3 ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[FOR:%.*]] = phi i32 [ 4, [[SCALAR_PH]] ], [ [[IV]], [[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[FOR]] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds nuw i32, ptr [[DST]], i32 [[IV]] -; CHECK-NEXT: store i32 [[SUB]], ptr [[GEP_DST]], align 4 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll index 9528510..2f33e11 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll @@ -45,7 +45,8 @@ define void @foo1(ptr noalias %in, ptr noalias %out, ptr noalias %trigger, ptr n ; AVX512-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; AVX512: middle.block: ; AVX512-NEXT: br label [[FOR_END:%.*]] -; AVX512: scalar.ph: +; AVX512: for.end: +; AVX512-NEXT: ret void ; ; FVW2-LABEL: @foo1( ; FVW2-NEXT: entry: @@ -70,7 +71,8 @@ define void @foo1(ptr noalias %in, ptr noalias %out, ptr noalias %trigger, ptr n ; FVW2-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; FVW2: middle.block: ; FVW2-NEXT: br label [[FOR_END:%.*]] -; FVW2: scalar.ph: +; FVW2: for.end: +; FVW2-NEXT: ret void ; entry: br label %for.body @@ -137,7 +139,8 @@ define void @foo2(ptr noalias %in, ptr noalias %out, ptr noalias %trigger, ptr n ; AVX512-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; AVX512: middle.block: ; AVX512-NEXT: br label [[FOR_END:%.*]] -; AVX512: scalar.ph: +; AVX512: for.end: +; AVX512-NEXT: ret void ; ; FVW2-LABEL: @foo2( ; FVW2-NEXT: entry: @@ -182,7 +185,8 @@ define void @foo2(ptr noalias %in, ptr noalias %out, ptr noalias %trigger, ptr n ; FVW2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; FVW2: middle.block: ; FVW2-NEXT: br label [[FOR_END:%.*]] -; FVW2: scalar.ph: +; FVW2: for.end: +; FVW2-NEXT: ret void ; entry: br label %for.body @@ -250,7 +254,8 @@ define void @foo3(ptr noalias %in, ptr noalias %out, ptr noalias %trigger) { ; AVX512-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; AVX512: middle.block: ; AVX512-NEXT: br label [[FOR_END:%.*]] -; AVX512: scalar.ph: +; AVX512: for.end: +; AVX512-NEXT: ret void ; ; FVW2-LABEL: @foo3( ; FVW2-NEXT: entry: @@ -295,7 +300,8 @@ define void @foo3(ptr noalias %in, ptr noalias %out, ptr noalias %trigger) { ; FVW2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; FVW2: middle.block: ; FVW2-NEXT: br label [[FOR_END:%.*]] -; FVW2: scalar.ph: +; FVW2: for.end: +; FVW2-NEXT: ret void ; entry: br label %for.body @@ -350,7 +356,8 @@ define void @foo2_addrspace(ptr addrspace(1) noalias %in, ptr addrspace(1) noali ; AVX512-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; AVX512: middle.block: ; AVX512-NEXT: br label [[FOR_END:%.*]] -; AVX512: scalar.ph: +; AVX512: for.end: +; AVX512-NEXT: ret void ; ; FVW2-LABEL: @foo2_addrspace( ; FVW2-NEXT: entry: @@ -395,7 +402,8 @@ define void @foo2_addrspace(ptr addrspace(1) noalias %in, ptr addrspace(1) noali ; FVW2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; FVW2: middle.block: ; FVW2-NEXT: br label [[FOR_END:%.*]] -; FVW2: scalar.ph: +; FVW2: for.end: +; FVW2-NEXT: ret void ; entry: br label %for.body @@ -449,7 +457,8 @@ define void @foo2_addrspace2(ptr addrspace(1) noalias %in, ptr addrspace(0) noal ; AVX512-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; AVX512: middle.block: ; AVX512-NEXT: br label [[FOR_END:%.*]] -; AVX512: scalar.ph: +; AVX512: for.end: +; AVX512-NEXT: ret void ; ; FVW2-LABEL: @foo2_addrspace2( ; FVW2-NEXT: entry: @@ -494,7 +503,8 @@ define void @foo2_addrspace2(ptr addrspace(1) noalias %in, ptr addrspace(0) noal ; FVW2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; FVW2: middle.block: ; FVW2-NEXT: br label [[FOR_END:%.*]] -; FVW2: scalar.ph: +; FVW2: for.end: +; FVW2-NEXT: ret void ; entry: br label %for.body @@ -548,7 +558,8 @@ define void @foo2_addrspace3(ptr addrspace(0) noalias %in, ptr addrspace(1) noal ; AVX512-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; AVX512: middle.block: ; AVX512-NEXT: br label [[FOR_END:%.*]] -; AVX512: scalar.ph: +; AVX512: for.end: +; AVX512-NEXT: ret void ; ; FVW2-LABEL: @foo2_addrspace3( ; FVW2-NEXT: entry: @@ -593,7 +604,8 @@ define void @foo2_addrspace3(ptr addrspace(0) noalias %in, ptr addrspace(1) noal ; FVW2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; FVW2: middle.block: ; FVW2-NEXT: br label [[FOR_END:%.*]] -; FVW2: scalar.ph: +; FVW2: for.end: +; FVW2-NEXT: ret void ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll b/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll index b2d587c..877fcd4 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll @@ -90,29 +90,9 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; SSE: middle.block: ; SSE-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x double> [[PREDPHI3]], [[PREDPHI]] ; SSE-NEXT: [[TMP11:%.*]] = call fast double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> [[BIN_RDX]]) -; SSE-NEXT: br label [[DONE:%.*]] -; SSE: scalar.ph: -; SSE-NEXT: br label [[LOOP:%.*]] -; SSE: loop: -; SSE-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[NEXT_ITER:%.*]] ] -; SSE-NEXT: [[TOT:%.*]] = phi double [ 0.000000e+00, [[SCALAR_PH]] ], [ [[TOT_NEXT:%.*]], [[NEXT_ITER]] ] -; SSE-NEXT: [[ADDR:%.*]] = getelementptr double, ptr [[ARR]], i32 [[I]] -; SSE-NEXT: [[NEXTVAL:%.*]] = load double, ptr [[ADDR]], align 8 -; SSE-NEXT: [[TST:%.*]] = fcmp fast une double [[NEXTVAL]], 4.200000e+01 -; SSE-NEXT: br i1 [[TST]], label [[DO_ADD:%.*]], label [[NO_ADD:%.*]] -; SSE: do.add: -; SSE-NEXT: [[TOT_NEW:%.*]] = fadd fast double [[TOT]], [[NEXTVAL]] -; SSE-NEXT: br label [[NEXT_ITER]] -; SSE: no.add: -; SSE-NEXT: br label [[NEXT_ITER]] -; SSE: next.iter: -; SSE-NEXT: [[TOT_NEXT]] = phi double [ [[TOT]], [[NO_ADD]] ], [ [[TOT_NEW]], [[DO_ADD]] ] -; SSE-NEXT: [[I_NEXT]] = add i32 [[I]], 1 -; SSE-NEXT: [[AGAIN:%.*]] = icmp ult i32 [[I_NEXT]], 32 -; SSE-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[DONE]] +; SSE-NEXT: br label [[NEXT_ITER:%.*]] ; SSE: done: -; SSE-NEXT: [[TOT_NEXT_LCSSA:%.*]] = phi double [ [[TOT_NEXT]], [[NEXT_ITER]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] -; SSE-NEXT: ret double [[TOT_NEXT_LCSSA]] +; SSE-NEXT: ret double [[TMP11]] ; ; AVX-LABEL: @sumIfVector( ; AVX-NEXT: entry: @@ -153,29 +133,9 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; AVX-NEXT: [[BIN_RDX10:%.*]] = fadd fast <4 x double> [[PREDPHI8]], [[BIN_RDX]] ; AVX-NEXT: [[BIN_RDX11:%.*]] = fadd fast <4 x double> [[PREDPHI9]], [[BIN_RDX10]] ; AVX-NEXT: [[TMP21:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[BIN_RDX11]]) -; AVX-NEXT: br label [[DONE:%.*]] -; AVX: scalar.ph: -; AVX-NEXT: br label [[LOOP:%.*]] -; AVX: loop: -; AVX-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[NEXT_ITER:%.*]] ] -; AVX-NEXT: [[TOT:%.*]] = phi double [ 0.000000e+00, [[SCALAR_PH]] ], [ [[TOT_NEXT:%.*]], [[NEXT_ITER]] ] -; AVX-NEXT: [[ADDR:%.*]] = getelementptr double, ptr [[ARR]], i32 [[I]] -; AVX-NEXT: [[NEXTVAL:%.*]] = load double, ptr [[ADDR]], align 8 -; AVX-NEXT: [[TST:%.*]] = fcmp fast une double [[NEXTVAL]], 4.200000e+01 -; AVX-NEXT: br i1 [[TST]], label [[DO_ADD:%.*]], label [[NO_ADD:%.*]] -; AVX: do.add: -; AVX-NEXT: [[TOT_NEW:%.*]] = fadd fast double [[TOT]], [[NEXTVAL]] -; AVX-NEXT: br label [[NEXT_ITER]] -; AVX: no.add: -; AVX-NEXT: br label [[NEXT_ITER]] -; AVX: next.iter: -; AVX-NEXT: [[TOT_NEXT]] = phi double [ [[TOT]], [[NO_ADD]] ], [ [[TOT_NEW]], [[DO_ADD]] ] -; AVX-NEXT: [[I_NEXT]] = add i32 [[I]], 1 -; AVX-NEXT: [[AGAIN:%.*]] = icmp ult i32 [[I_NEXT]], 32 -; AVX-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[DONE]] +; AVX-NEXT: br label [[NEXT_ITER:%.*]] ; AVX: done: -; AVX-NEXT: [[TOT_NEXT_LCSSA:%.*]] = phi double [ [[TOT_NEXT]], [[NEXT_ITER]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ] -; AVX-NEXT: ret double [[TOT_NEXT_LCSSA]] +; AVX-NEXT: ret double [[TMP21]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll index 27eef01..a19b294 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll @@ -409,21 +409,9 @@ define i16 @iv_and_step_trunc() { ; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <2 x i16> [[TMP2]], i32 0 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[REC_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[IV]] to i16 -; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[IV_NEXT]] to i16 -; CHECK-NEXT: [[REC_NEXT]] = mul i16 [[TMP3]], [[TMP4]] -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[REC_LCSSA:%.*]] = phi i16 [ [[SCALAR_RECUR]], [[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i16 [[REC_LCSSA]] +; CHECK-NEXT: ret i16 [[VECTOR_RECUR_EXTRACT_FOR_PHI]] ; entry: br label %loop @@ -612,16 +600,7 @@ define void @wide_iv_trunc(ptr %dst, i64 %N) { ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-NEXT: store i32 [[IV_TRUNC]], ptr [[DST]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label [[EXIT_LOOPEXIT]], label [[LOOP]] ; CHECK: exit.loopexit: ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: diff --git a/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll index 91c7e7a3..2f96278 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll @@ -38,36 +38,6 @@ define void @test_free_instructions_feeding_geps_for_interleave_groups(ptr noali ; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[P_INVAR]], align 4 -; CHECK-NEXT: [[IV_MUL:%.*]] = shl i64 [[IV]], 2 -; CHECK-NEXT: [[GEP_DST_19:%.*]] = getelementptr float, ptr [[DST_1]], i64 [[IV_MUL]] -; CHECK-NEXT: store float [[L_0]], ptr [[GEP_DST_19]], align 4 -; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[P_INVAR]], align 4 -; CHECK-NEXT: [[ADD_1:%.*]] = or disjoint i64 [[IV_MUL]], 1 -; CHECK-NEXT: [[GEP_DST_119:%.*]] = getelementptr float, ptr [[DST_1]], i64 [[ADD_1]] -; CHECK-NEXT: store float [[L_1]], ptr [[GEP_DST_119]], align 4 -; CHECK-NEXT: [[ADD_2:%.*]] = or disjoint i64 [[IV_MUL]], 2 -; CHECK-NEXT: [[GEP_DST_129:%.*]] = getelementptr float, ptr [[DST_1]], i64 [[ADD_2]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST_129]], align 4 -; CHECK-NEXT: [[ADD_3:%.*]] = or disjoint i64 [[IV_MUL]], 3 -; CHECK-NEXT: [[GEP_DST_140:%.*]] = getelementptr float, ptr [[DST_1]], i64 [[ADD_3]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST_140]], align 4 -; CHECK-NEXT: [[L_2:%.*]] = load float, ptr [[P_INVAR]], align 4 -; CHECK-NEXT: [[GEP_DST_247:%.*]] = getelementptr float, ptr [[DST_2]], i64 [[IV_MUL]] -; CHECK-NEXT: store float [[L_2]], ptr [[GEP_DST_247]], align 4 -; CHECK-NEXT: [[GEP_DST_255:%.*]] = getelementptr float, ptr [[DST_2]], i64 [[ADD_1]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST_255]], align 4 -; CHECK-NEXT: [[GEP_DST_265:%.*]] = getelementptr float, ptr [[DST_2]], i64 [[ADD_2]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST_265]], align 4 -; CHECK-NEXT: [[GEP_DST_276:%.*]] = getelementptr float, ptr [[DST_2]], i64 [[ADD_3]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST_276]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -504,17 +474,6 @@ define void @interleave_store_double_i64(ptr %dst) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[IV]], i32 1 -; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_1]], align 8 -; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_0]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -616,17 +575,6 @@ define void @interleave_store_i64_double_2(ptr %dst) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_0]], align 8 -; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[IV]], i32 1 -; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_1]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll b/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll index 228bc80..e2329fe 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll @@ -34,13 +34,9 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) { ; SSE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; SSE-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; SSE: middle.block: -; SSE-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; SSE: scalar.ph: ; SSE-NEXT: br label [[FOR_BODY:%.*]] ; SSE: for.cond.cleanup: ; SSE-NEXT: ret void -; SSE: for.body: -; SSE-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; ; AVX1-LABEL: @foo( ; AVX1-NEXT: entry: @@ -88,13 +84,9 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) { ; AVX1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; AVX1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; AVX1: middle.block: -; AVX1-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; AVX1: scalar.ph: ; AVX1-NEXT: br label [[FOR_BODY:%.*]] ; AVX1: for.cond.cleanup: ; AVX1-NEXT: ret void -; AVX1: for.body: -; AVX1-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; ; AVX2-LABEL: @foo( ; AVX2-NEXT: entry: @@ -142,13 +134,9 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) { ; AVX2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; AVX2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; AVX2: middle.block: -; AVX2-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; AVX2: scalar.ph: ; AVX2-NEXT: br label [[FOR_BODY:%.*]] ; AVX2: for.cond.cleanup: ; AVX2-NEXT: ret void -; AVX2: for.body: -; AVX2-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; ; ATOM-LABEL: @foo( ; ATOM-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll index 5853e91..5d40e6a 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll @@ -409,16 +409,6 @@ define void @test_store_of_final_reduction_value(i64 %x, ptr %dst) { ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> [[TMP0]]) ; CHECK-NEXT: store i64 [[TMP1]], ptr [[DST]], align 8 ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV4:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED_NEXT]] = mul i64 [[RED]], [[X]] -; CHECK-NEXT: store i64 [[RED_NEXT]], ptr [[DST]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV4]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV4]], 1 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll index 9e0ef73..2a8c698 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll @@ -63,27 +63,9 @@ define i32 @test_explicit_pred(i64 %len) { ; CHECK-NEXT: [[BIN_RDX13:%.*]] = add <4 x i32> [[TMP18]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX14:%.*]] = add <4 x i32> [[TMP19]], [[BIN_RDX13]] ; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX14]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EARLYCND:%.*]] = icmp slt i64 [[IV]], [[LEN]] -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP21]] ; entry: %alloca = alloca [4096 x i32] @@ -212,28 +194,9 @@ define i32 @test_explicit_pred_generic(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %alloca = alloca [4096 x i32] @@ -390,27 +353,9 @@ define i32 @test_invariant_address(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX7:%.*]] = add <4 x i32> [[TMP98]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX8:%.*]] = add <4 x i32> [[TMP99]], [[BIN_RDX7]] ; CHECK-NEXT: [[TMP101:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX8]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ALLOCA]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP101]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP101]] ; entry: %alloca = alloca [4096 x i32] @@ -659,28 +604,9 @@ define i32 @test_step_narrower_than_access(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX37:%.*]] = add <4 x i32> [[TMP146]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX38:%.*]] = add <4 x i32> [[TMP147]], [[BIN_RDX37]] ; CHECK-NEXT: [[TMP149:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX38]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR_I16P:%.*]] = getelementptr inbounds i16, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR_I16P]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP149]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP149]] ; entry: %alloca = alloca [4096 x i32] @@ -974,28 +900,9 @@ define i32 @test_non_zero_start(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1024, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %alloca = alloca [4096 x i32] @@ -1216,28 +1123,9 @@ define i32 @test_non_unit_stride(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX7:%.*]] = add <4 x i32> [[TMP114]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX8:%.*]] = add <4 x i32> [[TMP115]], [[BIN_RDX7]] ; CHECK-NEXT: [[TMP117:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX8]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 2 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4093 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP117]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP117]] ; entry: %alloca = alloca [4096 x i32] @@ -1366,28 +1254,9 @@ define i32 @neg_off_by_many(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %alloca = alloca [1024 x i32] @@ -1516,28 +1385,9 @@ define i32 @neg_off_by_one_iteration(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %alloca = alloca [4095 x i32] @@ -1666,28 +1516,9 @@ define i32 @neg_off_by_one_byte(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %alloca = alloca [16383 x i8] @@ -1985,28 +1816,9 @@ define i32 @test_allocsize(i64 %len, ptr %test_base) nofree nosync { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCATION]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %allocation = call nonnull ptr @my_alloc(i32 16384) @@ -2136,28 +1948,9 @@ define i32 @test_allocsize_array(i64 %len, ptr %test_base) nofree nosync { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCATION]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %allocation = call nonnull ptr @my_array_alloc(i32 4096, i32 4) @@ -2297,28 +2090,9 @@ define i32 @test_allocsize_cond_deref(i1 %allzero, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCATION]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %allocation = call nonnull ptr @my_alloc(i32 16384) diff --git a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll index d0991a5..e23f8a9 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll @@ -1199,19 +1199,7 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; O1VEC2-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 ; O1VEC2-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; O1VEC2: middle.block: -; O1VEC2-NEXT: br label [[FOR_END:%.*]] -; O1VEC2: scalar.ph: ; O1VEC2-NEXT: br label [[FOR_BODY:%.*]] -; O1VEC2: for.body: -; O1VEC2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; O1VEC2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS_IV]] -; O1VEC2-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; O1VEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[N]] -; O1VEC2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS_IV]] -; O1VEC2-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 -; O1VEC2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; O1VEC2-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; O1VEC2-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]] ; O1VEC2: for.end: ; O1VEC2-NEXT: [[TMP11:%.*]] = load i32, ptr [[A]], align 4 ; O1VEC2-NEXT: ret i32 [[TMP11]] @@ -1239,19 +1227,7 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; OzVEC2-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 ; OzVEC2-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; OzVEC2: middle.block: -; OzVEC2-NEXT: br label [[FOR_END:%.*]] -; OzVEC2: scalar.ph: ; OzVEC2-NEXT: br label [[FOR_BODY:%.*]] -; OzVEC2: for.body: -; OzVEC2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; OzVEC2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS_IV]] -; OzVEC2-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OzVEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[N]] -; OzVEC2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS_IV]] -; OzVEC2-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 -; OzVEC2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; OzVEC2-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; OzVEC2-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]] ; OzVEC2: for.end: ; OzVEC2-NEXT: [[TMP11:%.*]] = load i32, ptr [[A]], align 4 ; OzVEC2-NEXT: ret i32 [[TMP11]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll index fc37e5f..e1140b5 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll @@ -32,18 +32,6 @@ define i32 @foo_optsize() #0 { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0 -; CHECK-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1 -; CHECK-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[INC]] = add nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret i32 0 ; @@ -69,18 +57,6 @@ define i32 @foo_optsize() #0 { ; AUTOVF-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; AUTOVF: [[MIDDLE_BLOCK]]: ; AUTOVF-NEXT: br label %[[FOR_END:.*]] -; AUTOVF: [[SCALAR_PH:.*]]: -; AUTOVF-NEXT: br label %[[FOR_BODY:.*]] -; AUTOVF: [[FOR_BODY]]: -; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]] -; AUTOVF-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; AUTOVF-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0 -; AUTOVF-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1 -; AUTOVF-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1 -; AUTOVF-NEXT: [[INC]] = add nsw i32 [[I_08]], 1 -; AUTOVF-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202 -; AUTOVF-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; AUTOVF: [[FOR_END]]: ; AUTOVF-NEXT: ret i32 0 ; @@ -128,18 +104,6 @@ define i32 @foo_minsize() #1 { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0 -; CHECK-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1 -; CHECK-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[INC]] = add nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret i32 0 ; @@ -165,18 +129,6 @@ define i32 @foo_minsize() #1 { ; AUTOVF-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; AUTOVF: [[MIDDLE_BLOCK]]: ; AUTOVF-NEXT: br label %[[FOR_END:.*]] -; AUTOVF: [[SCALAR_PH:.*]]: -; AUTOVF-NEXT: br label %[[FOR_BODY:.*]] -; AUTOVF: [[FOR_BODY]]: -; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]] -; AUTOVF-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; AUTOVF-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0 -; AUTOVF-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1 -; AUTOVF-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1 -; AUTOVF-NEXT: [[INC]] = add nsw i32 [[I_08]], 1 -; AUTOVF-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202 -; AUTOVF-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; AUTOVF: [[FOR_END]]: ; AUTOVF-NEXT: ret i32 0 ; @@ -226,18 +178,6 @@ define void @scev4stride1(ptr noalias nocapture %a, ptr noalias nocapture readon ; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END_LOOPEXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[I_07:%.*]] = phi i32 [ [[INC:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_07]], [[K]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[MUL]] -; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_07]] -; CHECK-NEXT: store i32 [[TMP6]], ptr [[ARRAYIDX1]], align 4 -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_07]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]] ; CHECK: [[FOR_END_LOOPEXIT]]: ; CHECK-NEXT: ret void ; @@ -263,18 +203,6 @@ define void @scev4stride1(ptr noalias nocapture %a, ptr noalias nocapture readon ; AUTOVF-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; AUTOVF: [[MIDDLE_BLOCK]]: ; AUTOVF-NEXT: br label %[[FOR_END_LOOPEXIT:.*]] -; AUTOVF: [[SCALAR_PH:.*]]: -; AUTOVF-NEXT: br label %[[FOR_BODY:.*]] -; AUTOVF: [[FOR_BODY]]: -; AUTOVF-NEXT: [[I_07:%.*]] = phi i32 [ [[INC:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; AUTOVF-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_07]], [[K]] -; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[MUL]] -; AUTOVF-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; AUTOVF-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_07]] -; AUTOVF-NEXT: store i32 [[TMP6]], ptr [[ARRAYIDX1]], align 4 -; AUTOVF-NEXT: [[INC]] = add nuw nsw i32 [[I_07]], 1 -; AUTOVF-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 256 -; AUTOVF-NEXT: br i1 [[EXITCOND]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]] ; AUTOVF: [[FOR_END_LOOPEXIT]]: ; AUTOVF-NEXT: ret void ; @@ -431,14 +359,6 @@ define void @tail_folded_store_avx512(ptr %start, ptr %end) #3 { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 -72 -; CHECK-NEXT: store ptr null, ptr [[PTR_IV]], align 8 -; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -475,14 +395,6 @@ define void @tail_folded_store_avx512(ptr %start, ptr %end) #3 { ; AUTOVF-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; AUTOVF: [[MIDDLE_BLOCK]]: ; AUTOVF-NEXT: br label %[[EXIT:.*]] -; AUTOVF: [[SCALAR_PH:.*]]: -; AUTOVF-NEXT: br label %[[LOOP:.*]] -; AUTOVF: [[LOOP]]: -; AUTOVF-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ] -; AUTOVF-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 -72 -; AUTOVF-NEXT: store ptr null, ptr [[PTR_IV]], align 8 -; AUTOVF-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]] -; AUTOVF-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; AUTOVF: [[EXIT]]: ; AUTOVF-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll b/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll index 65f8487..5d76dfb 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll @@ -108,11 +108,7 @@ define void @parallel_loop(ptr nocapture %a, ptr nocapture %b) nounwind uwtable ; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512 ; CHECK-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr141968-instsimplifyfolder.ll b/llvm/test/Transforms/LoopVectorize/X86/pr141968-instsimplifyfolder.ll index 62eacf6..619693a 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr141968-instsimplifyfolder.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr141968-instsimplifyfolder.ll @@ -104,23 +104,8 @@ define i8 @pr141968(i1 %cond, i8 %v) { ; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: br i1 [[COND]], label %[[LOOP_LATCH]], label %[[COND_FALSE:.*]] -; CHECK: [[COND_FALSE]]: -; CHECK-NEXT: [[SDIV:%.*]] = sdiv i16 [[SEXT]], [[ZEXT_TRUE]] -; CHECK-NEXT: [[SDIV_TRUNC:%.*]] = trunc i16 [[SDIV]] to i8 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[RET:%.*]] = phi i8 [ [[SDIV_TRUNC]], %[[COND_FALSE]] ], [ 0, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i8 [[IV_NEXT]], 0 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RET_LCSSA:%.*]] = phi i8 [ [[RET]], %[[LOOP_LATCH]] ], [ [[PREDPHI]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i8 [[RET_LCSSA]] +; CHECK-NEXT: ret i8 [[PREDPHI]] ; entry: %zext.true = zext i1 true to i16 diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr34438.ll b/llvm/test/Transforms/LoopVectorize/X86/pr34438.ll index 972164f..47db49c 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr34438.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr34438.ll @@ -16,26 +16,13 @@ define void @small_tc(ptr noalias nocapture %A, ptr noalias nocapture readonly % ; CHECK: vector.ph: ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[TMP0:%.*]], align 4, !llvm.access.group [[ACC_GRP0:![0-9]+]] -; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x float>, ptr [[TMP2:%.*]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[B:%.*]], align 4, !llvm.access.group [[ACC_GRP0:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x float>, ptr [[A:%.*]], align 4, !llvm.access.group [[ACC_GRP0]] ; CHECK-NEXT: [[TMP4:%.*]] = fadd fast <8 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]] -; CHECK-NEXT: store <8 x float> [[TMP4]], ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: store <8 x float> [[TMP4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP0]] ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP6]], [[TMP7]] -; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 8 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP1:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr51366-sunk-instruction-used-outside-of-loop.ll b/llvm/test/Transforms/LoopVectorize/X86/pr51366-sunk-instruction-used-outside-of-loop.ll index 0098065..e7f56a4 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr51366-sunk-instruction-used-outside-of-loop.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr51366-sunk-instruction-used-outside-of-loop.ll @@ -43,26 +43,8 @@ define ptr @test(ptr noalias %src, ptr noalias %dst) { ; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[CMP_1:%.*]] = icmp eq i64 [[IV]], 0 -; CHECK-NEXT: br i1 [[CMP_1]], label %[[LOOP_LATCH]], label %[[THEN:.*]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[M:%.*]] = phi i32 [ [[L]], %[[THEN]] ], [ 0, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i32 [[M]], ptr [[GEP_DST]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP_2:%.*]] = icmp slt i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[CMP_2]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[GEP_LCSSA:%.*]] = phi ptr [ [[GEP_SRC]], %[[LOOP_LATCH]] ], [ [[TMP2]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret ptr [[GEP_LCSSA]] +; CHECK-NEXT: ret ptr [[TMP2]] ; entry: br label %loop.header diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll index 3922796..3616379 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll @@ -39,23 +39,7 @@ define void @test(ptr noundef align 8 dereferenceable_or_null(16) %arr) #0 { ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 12 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF0:![0-9]+]], !llvm.loop [[LOOP1:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[BB6:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 99, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[AND:%.*]] = and i64 [[IV]], 1 -; CHECK-NEXT: [[ICMP17:%.*]] = icmp eq i64 [[AND]], 0 -; CHECK-NEXT: br i1 [[ICMP17]], label [[BB18:%.*]], label [[LOOP_LATCH]], !prof [[PROF5:![0-9]+]] -; CHECK: bb18: -; CHECK-NEXT: [[OR:%.*]] = or disjoint i64 [[IV]], 1 -; CHECK-NEXT: [[GETELEMENTPTR19:%.*]] = getelementptr inbounds i64, ptr [[ARR]], i64 [[OR]] -; CHECK-NEXT: store i64 1, ptr [[GETELEMENTPTR19]], align 8 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; CHECK-NEXT: [[ICMP22:%.*]] = icmp eq i64 [[IV_NEXT]], 90 -; CHECK-NEXT: br i1 [[ICMP22]], label [[BB6]], label [[LOOP_HEADER]], !prof [[PROF6:![0-9]+]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: bb6: ; CHECK-NEXT: ret void ; @@ -99,6 +83,4 @@ attributes #0 = {"target-cpu"="haswell" "target-features"="+avx2" } ; CHECK: [[META2]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK: [[META4]] = !{!"llvm.loop.estimated_trip_count", i32 24} -; CHECK: [[PROF5]] = !{!"branch_weights", i32 1, i32 1} -; CHECK: [[PROF6]] = !{!"branch_weights", i32 1, i32 95} ;. diff --git a/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll b/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll index 2bc3a97..f066000 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll @@ -71,23 +71,11 @@ define float @reduction_sum_float_fastmath(i32 %n, ptr %array) { ; CHECK: middle.block: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x float> [[TMP7]], [[TMP6]] ; CHECK-NEXT: [[TMP9:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[BIN_RDX]]) -; CHECK-NEXT: br label [[LOOP_EXIT_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IDX:%.*]] = phi i32 [ [[IDX_INC:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi float [ [[SUM_INC:%.*]], [[LOOP]] ], [ 0.000000e+00, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ADDRESS:%.*]] = getelementptr float, ptr [[ARRAY]], i32 [[IDX]] -; CHECK-NEXT: [[VALUE:%.*]] = load float, ptr [[ADDRESS]], align 4 -; CHECK-NEXT: [[SUM_INC]] = fadd fast float [[SUM]], [[VALUE]] -; CHECK-NEXT: [[IDX_INC]] = add i32 [[IDX]], 1 -; CHECK-NEXT: [[BE_COND:%.*]] = icmp ne i32 [[IDX_INC]], 4096 -; CHECK-NEXT: br i1 [[BE_COND]], label [[LOOP]], label [[LOOP_EXIT_LOOPEXIT]] ; CHECK: loop.exit.loopexit: -; CHECK-NEXT: [[SUM_INC_LCSSA:%.*]] = phi float [ [[SUM_INC]], [[LOOP]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP_EXIT]] ; CHECK: loop.exit: -; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[SUM_INC_LCSSA]], [[LOOP_EXIT_LOOPEXIT]] ] +; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP9]], [[LOOP]] ] ; CHECK-NEXT: ret float [[SUM_LCSSA]] ; entry: @@ -134,23 +122,11 @@ define float @reduction_sum_float_only_reassoc(i32 %n, ptr %array) { ; CHECK: middle.block: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <4 x float> [[TMP7]], [[TMP6]] ; CHECK-NEXT: [[TMP9:%.*]] = call reassoc float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[BIN_RDX]]) -; CHECK-NEXT: br label [[LOOP_EXIT_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IDX:%.*]] = phi i32 [ [[IDX_INC:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi float [ [[SUM_INC:%.*]], [[LOOP]] ], [ -0.000000e+00, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ADDRESS:%.*]] = getelementptr float, ptr [[ARRAY]], i32 [[IDX]] -; CHECK-NEXT: [[VALUE:%.*]] = load float, ptr [[ADDRESS]], align 4 -; CHECK-NEXT: [[SUM_INC]] = fadd reassoc float [[SUM]], [[VALUE]] -; CHECK-NEXT: [[IDX_INC]] = add i32 [[IDX]], 1 -; CHECK-NEXT: [[BE_COND:%.*]] = icmp ne i32 [[IDX_INC]], 4096 -; CHECK-NEXT: br i1 [[BE_COND]], label [[LOOP]], label [[LOOP_EXIT_LOOPEXIT]] ; CHECK: loop.exit.loopexit: -; CHECK-NEXT: [[SUM_INC_LCSSA:%.*]] = phi float [ [[SUM_INC]], [[LOOP]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP_EXIT]] ; CHECK: loop.exit: -; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ -0.000000e+00, [[ENTRY:%.*]] ], [ [[SUM_INC_LCSSA]], [[LOOP_EXIT_LOOPEXIT]] ] +; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ -0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP9]], [[LOOP]] ] ; CHECK-NEXT: ret float [[SUM_LCSSA]] ; entry: @@ -197,23 +173,11 @@ define float @reduction_sum_float_only_reassoc_and_contract(i32 %n, ptr %array) ; CHECK: middle.block: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc contract <4 x float> [[TMP7]], [[TMP6]] ; CHECK-NEXT: [[TMP9:%.*]] = call reassoc contract float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[BIN_RDX]]) -; CHECK-NEXT: br label [[LOOP_EXIT_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IDX:%.*]] = phi i32 [ [[IDX_INC:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi float [ [[SUM_INC:%.*]], [[LOOP]] ], [ -0.000000e+00, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ADDRESS:%.*]] = getelementptr float, ptr [[ARRAY]], i32 [[IDX]] -; CHECK-NEXT: [[VALUE:%.*]] = load float, ptr [[ADDRESS]], align 4 -; CHECK-NEXT: [[SUM_INC]] = fadd reassoc contract float [[SUM]], [[VALUE]] -; CHECK-NEXT: [[IDX_INC]] = add i32 [[IDX]], 1 -; CHECK-NEXT: [[BE_COND:%.*]] = icmp ne i32 [[IDX_INC]], 4096 -; CHECK-NEXT: br i1 [[BE_COND]], label [[LOOP]], label [[LOOP_EXIT_LOOPEXIT]] ; CHECK: loop.exit.loopexit: -; CHECK-NEXT: [[SUM_INC_LCSSA:%.*]] = phi float [ [[SUM_INC]], [[LOOP]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP_EXIT]] ; CHECK: loop.exit: -; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ -0.000000e+00, [[ENTRY:%.*]] ], [ [[SUM_INC_LCSSA]], [[LOOP_EXIT_LOOPEXIT]] ] +; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ -0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP9]], [[LOOP]] ] ; CHECK-NEXT: ret float [[SUM_LCSSA]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll b/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll index 90f3df5..70b05ac 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll @@ -50,23 +50,6 @@ define void @smax_call_uniform(ptr %dst, i64 %x) { ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: [[REM1:%.*]] = urem i64 [[MUL]], [[X]] -; CHECK-NEXT: [[SMAX:%.*]] = tail call i64 @llvm.smax.i64(i64 [[REM1]], i64 0) -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1, %[[LOOP_HEADER]] ], [ [[SMAX]], %[[ELSE]] ] -; CHECK-NEXT: [[IV_NEXT:%.*]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IV_NEXT]] -; CHECK-NEXT: store i64 0, ptr [[GEP1]], align 8 -; CHECK-NEXT: [[IV_NEXT1]] = add i64 [[IV1]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT1]], 1024 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll b/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll new file mode 100644 index 0000000..8784873 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll @@ -0,0 +1,460 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph:" --version 6 +; RUN: opt -p loop-vectorize -mtriple=x86_64-linux-gnu -S %s | FileCheck --check-prefix=I64 %s +; RUN: opt -p loop-vectorize -mtriple=i386-pc-linux-gnu -S %s | FileCheck --check-prefix=I32 %s + + +define void @test_store_initially_interleave(i32 %n, ptr noalias %src) #0 { +; I64-LABEL: define void @test_store_initially_interleave( +; I64-SAME: i32 [[N:%.*]], ptr noalias [[SRC:%.*]]) #[[ATTR0:[0-9]+]] { +; I64-NEXT: [[ITER_CHECK:.*:]] +; I64-NEXT: [[TMP4:%.*]] = add i32 [[N]], 1 +; I64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP4]], 4 +; I64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; I64: [[VECTOR_SCEVCHECK]]: +; I64-NEXT: [[TMP1:%.*]] = icmp slt i32 [[N]], 0 +; I64-NEXT: br i1 [[TMP1]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; I64: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: +; I64-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ule i32 [[TMP4]], 16 +; I64-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] +; I64: [[VECTOR_PH]]: +; I64-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP4]], 16 +; I64-NEXT: [[TMP2:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 +; I64-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 16, i32 [[N_MOD_VF]] +; I64-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP4]], [[TMP3]] +; I64-NEXT: br label %[[VECTOR_BODY:.*]] +; I64: [[VECTOR_BODY]]: +; I64-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; I64-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; I64-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 4) +; I64-NEXT: [[STEP_ADD_2:%.*]] = add <4 x i32> [[STEP_ADD]], splat (i32 4) +; I64-NEXT: [[STEP_ADD_3:%.*]] = add <4 x i32> [[STEP_ADD_2]], splat (i32 4) +; I64-NEXT: [[IV:%.*]] = add i32 [[INDEX]], 0 +; I64-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1 +; I64-NEXT: [[TMP6:%.*]] = add i32 [[INDEX]], 2 +; I64-NEXT: [[TMP7:%.*]] = add i32 [[INDEX]], 3 +; I64-NEXT: [[TMP8:%.*]] = add i32 [[INDEX]], 4 +; I64-NEXT: [[TMP9:%.*]] = add i32 [[INDEX]], 5 +; I64-NEXT: [[TMP10:%.*]] = add i32 [[INDEX]], 6 +; I64-NEXT: [[TMP11:%.*]] = add i32 [[INDEX]], 7 +; I64-NEXT: [[TMP12:%.*]] = add i32 [[INDEX]], 8 +; I64-NEXT: [[TMP13:%.*]] = add i32 [[INDEX]], 9 +; I64-NEXT: [[TMP14:%.*]] = add i32 [[INDEX]], 10 +; I64-NEXT: [[TMP15:%.*]] = add i32 [[INDEX]], 11 +; I64-NEXT: [[TMP16:%.*]] = add i32 [[INDEX]], 12 +; I64-NEXT: [[TMP17:%.*]] = add i32 [[INDEX]], 13 +; I64-NEXT: [[TMP18:%.*]] = add i32 [[INDEX]], 14 +; I64-NEXT: [[TMP19:%.*]] = add i32 [[INDEX]], 15 +; I64-NEXT: [[TMP20:%.*]] = uitofp <4 x i32> [[VEC_IND]] to <4 x double> +; I64-NEXT: [[TMP21:%.*]] = uitofp <4 x i32> [[STEP_ADD]] to <4 x double> +; I64-NEXT: [[TMP22:%.*]] = uitofp <4 x i32> [[STEP_ADD_2]] to <4 x double> +; I64-NEXT: [[TMP23:%.*]] = uitofp <4 x i32> [[STEP_ADD_3]] to <4 x double> +; I64-NEXT: [[ADD_PTR_I:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[IV]] +; I64-NEXT: [[TMP25:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP5]] +; I64-NEXT: [[TMP26:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP6]] +; I64-NEXT: [[TMP27:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP7]] +; I64-NEXT: [[TMP28:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP8]] +; I64-NEXT: [[TMP29:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP9]] +; I64-NEXT: [[TMP30:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP10]] +; I64-NEXT: [[TMP31:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP11]] +; I64-NEXT: [[TMP32:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP12]] +; I64-NEXT: [[TMP33:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP13]] +; I64-NEXT: [[TMP34:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP14]] +; I64-NEXT: [[TMP35:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP15]] +; I64-NEXT: [[TMP36:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP16]] +; I64-NEXT: [[TMP37:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP17]] +; I64-NEXT: [[TMP38:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP18]] +; I64-NEXT: [[TMP39:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP19]] +; I64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADD_PTR_I]], align 4 +; I64-NEXT: [[TMP41:%.*]] = load ptr, ptr [[TMP25]], align 4 +; I64-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP26]], align 4 +; I64-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP27]], align 4 +; I64-NEXT: [[TMP44:%.*]] = load ptr, ptr [[TMP28]], align 4 +; I64-NEXT: [[TMP45:%.*]] = load ptr, ptr [[TMP29]], align 4 +; I64-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP30]], align 4 +; I64-NEXT: [[TMP47:%.*]] = load ptr, ptr [[TMP31]], align 4 +; I64-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP32]], align 4 +; I64-NEXT: [[TMP49:%.*]] = load ptr, ptr [[TMP33]], align 4 +; I64-NEXT: [[TMP50:%.*]] = load ptr, ptr [[TMP34]], align 4 +; I64-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP35]], align 4 +; I64-NEXT: [[TMP52:%.*]] = load ptr, ptr [[TMP36]], align 4 +; I64-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP37]], align 4 +; I64-NEXT: [[TMP54:%.*]] = load ptr, ptr [[TMP38]], align 4 +; I64-NEXT: [[TMP55:%.*]] = load ptr, ptr [[TMP39]], align 4 +; I64-NEXT: [[CONV:%.*]] = extractelement <4 x double> [[TMP20]], i32 0 +; I64-NEXT: store double [[CONV]], ptr [[TMP0]], align 4 +; I64-NEXT: [[TMP57:%.*]] = extractelement <4 x double> [[TMP20]], i32 1 +; I64-NEXT: store double [[TMP57]], ptr [[TMP41]], align 4 +; I64-NEXT: [[TMP58:%.*]] = extractelement <4 x double> [[TMP20]], i32 2 +; I64-NEXT: store double [[TMP58]], ptr [[TMP42]], align 4 +; I64-NEXT: [[TMP59:%.*]] = extractelement <4 x double> [[TMP20]], i32 3 +; I64-NEXT: store double [[TMP59]], ptr [[TMP43]], align 4 +; I64-NEXT: [[TMP60:%.*]] = extractelement <4 x double> [[TMP21]], i32 0 +; I64-NEXT: store double [[TMP60]], ptr [[TMP44]], align 4 +; I64-NEXT: [[TMP61:%.*]] = extractelement <4 x double> [[TMP21]], i32 1 +; I64-NEXT: store double [[TMP61]], ptr [[TMP45]], align 4 +; I64-NEXT: [[TMP62:%.*]] = extractelement <4 x double> [[TMP21]], i32 2 +; I64-NEXT: store double [[TMP62]], ptr [[TMP46]], align 4 +; I64-NEXT: [[TMP63:%.*]] = extractelement <4 x double> [[TMP21]], i32 3 +; I64-NEXT: store double [[TMP63]], ptr [[TMP47]], align 4 +; I64-NEXT: [[TMP64:%.*]] = extractelement <4 x double> [[TMP22]], i32 0 +; I64-NEXT: store double [[TMP64]], ptr [[TMP48]], align 4 +; I64-NEXT: [[TMP65:%.*]] = extractelement <4 x double> [[TMP22]], i32 1 +; I64-NEXT: store double [[TMP65]], ptr [[TMP49]], align 4 +; I64-NEXT: [[TMP66:%.*]] = extractelement <4 x double> [[TMP22]], i32 2 +; I64-NEXT: store double [[TMP66]], ptr [[TMP50]], align 4 +; I64-NEXT: [[TMP67:%.*]] = extractelement <4 x double> [[TMP22]], i32 3 +; I64-NEXT: store double [[TMP67]], ptr [[TMP51]], align 4 +; I64-NEXT: [[TMP68:%.*]] = extractelement <4 x double> [[TMP23]], i32 0 +; I64-NEXT: store double [[TMP68]], ptr [[TMP52]], align 4 +; I64-NEXT: [[TMP69:%.*]] = extractelement <4 x double> [[TMP23]], i32 1 +; I64-NEXT: store double [[TMP69]], ptr [[TMP53]], align 4 +; I64-NEXT: [[TMP70:%.*]] = extractelement <4 x double> [[TMP23]], i32 2 +; I64-NEXT: store double [[TMP70]], ptr [[TMP54]], align 4 +; I64-NEXT: [[TMP71:%.*]] = extractelement <4 x double> [[TMP23]], i32 3 +; I64-NEXT: store double [[TMP71]], ptr [[TMP55]], align 4 +; I64-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 +; I64-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[STEP_ADD_3]], splat (i32 4) +; I64-NEXT: [[TMP72:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; I64-NEXT: br i1 [[TMP72]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; I64: [[MIDDLE_BLOCK]]: +; I64-NEXT: br label %[[VEC_EPILOG_ITER_CHECK:.*]] +; I64: [[VEC_EPILOG_ITER_CHECK]]: +; I64-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP3]], 4 +; I64-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] +; I64: [[VEC_EPILOG_PH]]: +; I64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; I64-NEXT: [[N_MOD_VF2:%.*]] = urem i32 [[TMP4]], 4 +; I64-NEXT: [[TMP73:%.*]] = icmp eq i32 [[N_MOD_VF2]], 0 +; I64-NEXT: [[TMP74:%.*]] = select i1 [[TMP73]], i32 4, i32 [[N_MOD_VF2]] +; I64-NEXT: [[N_VEC3:%.*]] = sub i32 [[TMP4]], [[TMP74]] +; I64-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[BC_RESUME_VAL]], i64 0 +; I64-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; I64-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3> +; I64-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; I64: [[VEC_EPILOG_VECTOR_BODY]]: +; I64-NEXT: [[INDEX4:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; I64-NEXT: [[VEC_IND5:%.*]] = phi <4 x i32> [ [[INDUCTION]], %[[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT7:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; I64-NEXT: [[TMP75:%.*]] = add i32 [[INDEX4]], 0 +; I64-NEXT: [[TMP76:%.*]] = add i32 [[INDEX4]], 1 +; I64-NEXT: [[TMP77:%.*]] = add i32 [[INDEX4]], 2 +; I64-NEXT: [[TMP78:%.*]] = add i32 [[INDEX4]], 3 +; I64-NEXT: [[TMP79:%.*]] = uitofp <4 x i32> [[VEC_IND5]] to <4 x double> +; I64-NEXT: [[TMP80:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP75]] +; I64-NEXT: [[TMP81:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP76]] +; I64-NEXT: [[TMP82:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP77]] +; I64-NEXT: [[TMP83:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP78]] +; I64-NEXT: [[TMP84:%.*]] = load ptr, ptr [[TMP80]], align 4 +; I64-NEXT: [[TMP85:%.*]] = load ptr, ptr [[TMP81]], align 4 +; I64-NEXT: [[TMP86:%.*]] = load ptr, ptr [[TMP82]], align 4 +; I64-NEXT: [[TMP87:%.*]] = load ptr, ptr [[TMP83]], align 4 +; I64-NEXT: [[TMP88:%.*]] = extractelement <4 x double> [[TMP79]], i32 0 +; I64-NEXT: store double [[TMP88]], ptr [[TMP84]], align 4 +; I64-NEXT: [[TMP89:%.*]] = extractelement <4 x double> [[TMP79]], i32 1 +; I64-NEXT: store double [[TMP89]], ptr [[TMP85]], align 4 +; I64-NEXT: [[TMP90:%.*]] = extractelement <4 x double> [[TMP79]], i32 2 +; I64-NEXT: store double [[TMP90]], ptr [[TMP86]], align 4 +; I64-NEXT: [[TMP91:%.*]] = extractelement <4 x double> [[TMP79]], i32 3 +; I64-NEXT: store double [[TMP91]], ptr [[TMP87]], align 4 +; I64-NEXT: [[INDEX_NEXT6]] = add nuw i32 [[INDEX4]], 4 +; I64-NEXT: [[VEC_IND_NEXT7]] = add <4 x i32> [[VEC_IND5]], splat (i32 4) +; I64-NEXT: [[TMP92:%.*]] = icmp eq i32 [[INDEX_NEXT6]], [[N_VEC3]] +; I64-NEXT: br i1 [[TMP92]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; I64: [[VEC_EPILOG_MIDDLE_BLOCK]]: +; I64-NEXT: br label %[[VEC_EPILOG_SCALAR_PH]] +; I64: [[VEC_EPILOG_SCALAR_PH]]: +; +; I32-LABEL: define void @test_store_initially_interleave( +; I32-SAME: i32 [[N:%.*]], ptr noalias [[SRC:%.*]]) #[[ATTR0:[0-9]+]] { +; I32-NEXT: [[ENTRY:.*:]] +; I32-NEXT: [[TMP0:%.*]] = add i32 [[N]], 1 +; I32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP0]], 4 +; I32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; I32: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: +; I32-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ule i32 [[TMP0]], 16 +; I32-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] +; I32: [[VECTOR_PH]]: +; I32-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 16 +; I32-NEXT: [[TMP1:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 +; I32-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 16, i32 [[N_MOD_VF]] +; I32-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP2]] +; I32-NEXT: br label %[[VECTOR_BODY:.*]] +; I32: [[VECTOR_BODY]]: +; I32-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; I32-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; I32-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 4) +; I32-NEXT: [[STEP_ADD_2:%.*]] = add <4 x i32> [[STEP_ADD]], splat (i32 4) +; I32-NEXT: [[STEP_ADD_3:%.*]] = add <4 x i32> [[STEP_ADD_2]], splat (i32 4) +; I32-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0 +; I32-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 1 +; I32-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 2 +; I32-NEXT: [[TMP6:%.*]] = add i32 [[INDEX]], 3 +; I32-NEXT: [[TMP7:%.*]] = add i32 [[INDEX]], 4 +; I32-NEXT: [[TMP8:%.*]] = add i32 [[INDEX]], 5 +; I32-NEXT: [[TMP9:%.*]] = add i32 [[INDEX]], 6 +; I32-NEXT: [[TMP10:%.*]] = add i32 [[INDEX]], 7 +; I32-NEXT: [[TMP11:%.*]] = add i32 [[INDEX]], 8 +; I32-NEXT: [[TMP12:%.*]] = add i32 [[INDEX]], 9 +; I32-NEXT: [[TMP13:%.*]] = add i32 [[INDEX]], 10 +; I32-NEXT: [[TMP14:%.*]] = add i32 [[INDEX]], 11 +; I32-NEXT: [[TMP40:%.*]] = add i32 [[INDEX]], 12 +; I32-NEXT: [[TMP41:%.*]] = add i32 [[INDEX]], 13 +; I32-NEXT: [[TMP42:%.*]] = add i32 [[INDEX]], 14 +; I32-NEXT: [[TMP43:%.*]] = add i32 [[INDEX]], 15 +; I32-NEXT: [[TMP44:%.*]] = uitofp <4 x i32> [[VEC_IND]] to <4 x double> +; I32-NEXT: [[TMP45:%.*]] = uitofp <4 x i32> [[STEP_ADD]] to <4 x double> +; I32-NEXT: [[TMP46:%.*]] = uitofp <4 x i32> [[STEP_ADD_2]] to <4 x double> +; I32-NEXT: [[TMP55:%.*]] = uitofp <4 x i32> [[STEP_ADD_3]] to <4 x double> +; I32-NEXT: [[TMP15:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP3]] +; I32-NEXT: [[TMP16:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP4]] +; I32-NEXT: [[TMP17:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP5]] +; I32-NEXT: [[TMP18:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP6]] +; I32-NEXT: [[TMP19:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP7]] +; I32-NEXT: [[TMP20:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP8]] +; I32-NEXT: [[TMP21:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP9]] +; I32-NEXT: [[TMP22:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP10]] +; I32-NEXT: [[TMP56:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP11]] +; I32-NEXT: [[TMP57:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP12]] +; I32-NEXT: [[TMP58:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP13]] +; I32-NEXT: [[TMP59:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP14]] +; I32-NEXT: [[TMP60:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP40]] +; I32-NEXT: [[TMP61:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP41]] +; I32-NEXT: [[TMP62:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP42]] +; I32-NEXT: [[TMP71:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP43]] +; I32-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP15]], align 4 +; I32-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP16]], align 4 +; I32-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP17]], align 4 +; I32-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP18]], align 4 +; I32-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP19]], align 4 +; I32-NEXT: [[TMP28:%.*]] = load ptr, ptr [[TMP20]], align 4 +; I32-NEXT: [[TMP29:%.*]] = load ptr, ptr [[TMP21]], align 4 +; I32-NEXT: [[TMP30:%.*]] = load ptr, ptr [[TMP22]], align 4 +; I32-NEXT: [[TMP47:%.*]] = load ptr, ptr [[TMP56]], align 4 +; I32-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP57]], align 4 +; I32-NEXT: [[TMP49:%.*]] = load ptr, ptr [[TMP58]], align 4 +; I32-NEXT: [[TMP50:%.*]] = load ptr, ptr [[TMP59]], align 4 +; I32-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP60]], align 4 +; I32-NEXT: [[TMP52:%.*]] = load ptr, ptr [[TMP61]], align 4 +; I32-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP62]], align 4 +; I32-NEXT: [[TMP54:%.*]] = load ptr, ptr [[TMP71]], align 4 +; I32-NEXT: [[TMP31:%.*]] = extractelement <4 x double> [[TMP44]], i32 0 +; I32-NEXT: store double [[TMP31]], ptr [[TMP23]], align 4 +; I32-NEXT: [[TMP32:%.*]] = extractelement <4 x double> [[TMP44]], i32 1 +; I32-NEXT: store double [[TMP32]], ptr [[TMP24]], align 4 +; I32-NEXT: [[TMP33:%.*]] = extractelement <4 x double> [[TMP44]], i32 2 +; I32-NEXT: store double [[TMP33]], ptr [[TMP25]], align 4 +; I32-NEXT: [[TMP34:%.*]] = extractelement <4 x double> [[TMP44]], i32 3 +; I32-NEXT: store double [[TMP34]], ptr [[TMP26]], align 4 +; I32-NEXT: [[TMP35:%.*]] = extractelement <4 x double> [[TMP45]], i32 0 +; I32-NEXT: store double [[TMP35]], ptr [[TMP27]], align 4 +; I32-NEXT: [[TMP36:%.*]] = extractelement <4 x double> [[TMP45]], i32 1 +; I32-NEXT: store double [[TMP36]], ptr [[TMP28]], align 4 +; I32-NEXT: [[TMP37:%.*]] = extractelement <4 x double> [[TMP45]], i32 2 +; I32-NEXT: store double [[TMP37]], ptr [[TMP29]], align 4 +; I32-NEXT: [[TMP38:%.*]] = extractelement <4 x double> [[TMP45]], i32 3 +; I32-NEXT: store double [[TMP38]], ptr [[TMP30]], align 4 +; I32-NEXT: [[TMP63:%.*]] = extractelement <4 x double> [[TMP46]], i32 0 +; I32-NEXT: store double [[TMP63]], ptr [[TMP47]], align 4 +; I32-NEXT: [[TMP64:%.*]] = extractelement <4 x double> [[TMP46]], i32 1 +; I32-NEXT: store double [[TMP64]], ptr [[TMP48]], align 4 +; I32-NEXT: [[TMP65:%.*]] = extractelement <4 x double> [[TMP46]], i32 2 +; I32-NEXT: store double [[TMP65]], ptr [[TMP49]], align 4 +; I32-NEXT: [[TMP66:%.*]] = extractelement <4 x double> [[TMP46]], i32 3 +; I32-NEXT: store double [[TMP66]], ptr [[TMP50]], align 4 +; I32-NEXT: [[TMP67:%.*]] = extractelement <4 x double> [[TMP55]], i32 0 +; I32-NEXT: store double [[TMP67]], ptr [[TMP51]], align 4 +; I32-NEXT: [[TMP68:%.*]] = extractelement <4 x double> [[TMP55]], i32 1 +; I32-NEXT: store double [[TMP68]], ptr [[TMP52]], align 4 +; I32-NEXT: [[TMP69:%.*]] = extractelement <4 x double> [[TMP55]], i32 2 +; I32-NEXT: store double [[TMP69]], ptr [[TMP53]], align 4 +; I32-NEXT: [[TMP70:%.*]] = extractelement <4 x double> [[TMP55]], i32 3 +; I32-NEXT: store double [[TMP70]], ptr [[TMP54]], align 4 +; I32-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 +; I32-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[STEP_ADD_3]], splat (i32 4) +; I32-NEXT: [[TMP39:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; I32-NEXT: br i1 [[TMP39]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; I32: [[MIDDLE_BLOCK]]: +; I32-NEXT: br label %[[VEC_EPILOG_ITER_CHECK:.*]] +; I32: [[VEC_EPILOG_ITER_CHECK]]: +; I32-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP2]], 4 +; I32-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] +; I32: [[VEC_EPILOG_PH]]: +; I32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; I32-NEXT: [[N_MOD_VF2:%.*]] = urem i32 [[TMP0]], 4 +; I32-NEXT: [[TMP72:%.*]] = icmp eq i32 [[N_MOD_VF2]], 0 +; I32-NEXT: [[TMP73:%.*]] = select i1 [[TMP72]], i32 4, i32 [[N_MOD_VF2]] +; I32-NEXT: [[N_VEC3:%.*]] = sub i32 [[TMP0]], [[TMP73]] +; I32-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[BC_RESUME_VAL]], i64 0 +; I32-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; I32-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3> +; I32-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; I32: [[VEC_EPILOG_VECTOR_BODY]]: +; I32-NEXT: [[INDEX4:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; I32-NEXT: [[VEC_IND5:%.*]] = phi <4 x i32> [ [[INDUCTION]], %[[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT7:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; I32-NEXT: [[TMP74:%.*]] = add i32 [[INDEX4]], 0 +; I32-NEXT: [[TMP75:%.*]] = add i32 [[INDEX4]], 1 +; I32-NEXT: [[TMP76:%.*]] = add i32 [[INDEX4]], 2 +; I32-NEXT: [[TMP77:%.*]] = add i32 [[INDEX4]], 3 +; I32-NEXT: [[TMP78:%.*]] = uitofp <4 x i32> [[VEC_IND5]] to <4 x double> +; I32-NEXT: [[TMP79:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP74]] +; I32-NEXT: [[TMP80:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP75]] +; I32-NEXT: [[TMP81:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP76]] +; I32-NEXT: [[TMP82:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP77]] +; I32-NEXT: [[TMP83:%.*]] = load ptr, ptr [[TMP79]], align 4 +; I32-NEXT: [[TMP84:%.*]] = load ptr, ptr [[TMP80]], align 4 +; I32-NEXT: [[TMP85:%.*]] = load ptr, ptr [[TMP81]], align 4 +; I32-NEXT: [[TMP86:%.*]] = load ptr, ptr [[TMP82]], align 4 +; I32-NEXT: [[TMP87:%.*]] = extractelement <4 x double> [[TMP78]], i32 0 +; I32-NEXT: store double [[TMP87]], ptr [[TMP83]], align 4 +; I32-NEXT: [[TMP88:%.*]] = extractelement <4 x double> [[TMP78]], i32 1 +; I32-NEXT: store double [[TMP88]], ptr [[TMP84]], align 4 +; I32-NEXT: [[TMP89:%.*]] = extractelement <4 x double> [[TMP78]], i32 2 +; I32-NEXT: store double [[TMP89]], ptr [[TMP85]], align 4 +; I32-NEXT: [[TMP90:%.*]] = extractelement <4 x double> [[TMP78]], i32 3 +; I32-NEXT: store double [[TMP90]], ptr [[TMP86]], align 4 +; I32-NEXT: [[INDEX_NEXT6]] = add nuw i32 [[INDEX4]], 4 +; I32-NEXT: [[VEC_IND_NEXT7]] = add <4 x i32> [[VEC_IND5]], splat (i32 4) +; I32-NEXT: [[TMP91:%.*]] = icmp eq i32 [[INDEX_NEXT6]], [[N_VEC3]] +; I32-NEXT: br i1 [[TMP91]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; I32: [[VEC_EPILOG_MIDDLE_BLOCK]]: +; I32-NEXT: br label %[[VEC_EPILOG_SCALAR_PH]] +; I32: [[VEC_EPILOG_SCALAR_PH]]: +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %inc, %loop ] + %conv = uitofp i32 %iv to double + %add.ptr.i = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 %iv + %0 = load ptr, ptr %add.ptr.i, align 4 + store double %conv, ptr %0, align 4 + %inc = add i32 %iv, 1 + %ec = icmp eq i32 %iv, %n + br i1 %ec, label %exit, label %loop + +exit: ; preds = %loop + ret void +} + +define void @test_store_loaded_value(ptr noalias %src, ptr noalias %dst, i32 %n) #0 { +; I64-LABEL: define void @test_store_loaded_value( +; I64-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[N:%.*]]) #[[ATTR0]] { +; I64-NEXT: [[BB:.*:]] +; I64-NEXT: [[PRE:%.*]] = icmp slt i32 [[N]], 1 +; I64-NEXT: br i1 [[PRE]], [[EXIT:label %.*]], label %[[PH:.*]] +; I64: [[PH]]: +; I64-NEXT: [[N_EXT:%.*]] = zext i32 [[N]] to i64 +; I64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N_EXT]], 4 +; I64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; I64: [[VECTOR_PH]]: +; I64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_EXT]], 4 +; I64-NEXT: [[N_VEC:%.*]] = sub i64 [[N_EXT]], [[N_MOD_VF]] +; I64-NEXT: br label %[[VECTOR_BODY:.*]] +; I64: [[VECTOR_BODY]]: +; I64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; I64-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; I64-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; I64-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; I64-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 +; I64-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP0]] +; I64-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP1]] +; I64-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP2]] +; I64-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP3]] +; I64-NEXT: [[TMP8:%.*]] = load double, ptr [[TMP4]], align 8 +; I64-NEXT: [[TMP9:%.*]] = load double, ptr [[TMP5]], align 8 +; I64-NEXT: [[TMP10:%.*]] = load double, ptr [[TMP6]], align 8 +; I64-NEXT: [[TMP11:%.*]] = load double, ptr [[TMP7]], align 8 +; I64-NEXT: [[TMP12:%.*]] = shl i64 [[TMP0]], 1 +; I64-NEXT: [[TMP13:%.*]] = shl i64 [[TMP1]], 1 +; I64-NEXT: [[TMP14:%.*]] = shl i64 [[TMP2]], 1 +; I64-NEXT: [[TMP15:%.*]] = shl i64 [[TMP3]], 1 +; I64-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP12]] +; I64-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP13]] +; I64-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP14]] +; I64-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP15]] +; I64-NEXT: store double [[TMP8]], ptr [[TMP16]], align 8 +; I64-NEXT: store double [[TMP9]], ptr [[TMP17]], align 8 +; I64-NEXT: store double [[TMP10]], ptr [[TMP18]], align 8 +; I64-NEXT: store double [[TMP11]], ptr [[TMP19]], align 8 +; I64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; I64-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; I64-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; I64: [[MIDDLE_BLOCK]]: +; I64-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_EXT]], [[N_VEC]] +; I64-NEXT: br i1 [[CMP_N]], [[EXIT_LOOPEXIT:label %.*]], label %[[SCALAR_PH]] +; I64: [[SCALAR_PH]]: +; +; I32-LABEL: define void @test_store_loaded_value( +; I32-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[N:%.*]]) #[[ATTR0]] { +; I32-NEXT: [[BB:.*:]] +; I32-NEXT: [[PRE:%.*]] = icmp slt i32 [[N]], 1 +; I32-NEXT: br i1 [[PRE]], [[EXIT:label %.*]], label %[[PH:.*]] +; I32: [[PH]]: +; I32-NEXT: [[N_EXT:%.*]] = zext i32 [[N]] to i64 +; I32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N_EXT]], 4 +; I32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; I32: [[VECTOR_PH]]: +; I32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_EXT]], 4 +; I32-NEXT: [[N_VEC:%.*]] = sub i64 [[N_EXT]], [[N_MOD_VF]] +; I32-NEXT: br label %[[VECTOR_BODY:.*]] +; I32: [[VECTOR_BODY]]: +; I32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; I32-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; I32-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; I32-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; I32-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 +; I32-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP0]] +; I32-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP1]] +; I32-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP2]] +; I32-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP3]] +; I32-NEXT: [[TMP8:%.*]] = load double, ptr [[TMP4]], align 8 +; I32-NEXT: [[TMP9:%.*]] = load double, ptr [[TMP5]], align 8 +; I32-NEXT: [[TMP10:%.*]] = load double, ptr [[TMP6]], align 8 +; I32-NEXT: [[TMP11:%.*]] = load double, ptr [[TMP7]], align 8 +; I32-NEXT: [[TMP12:%.*]] = shl i64 [[TMP0]], 1 +; I32-NEXT: [[TMP13:%.*]] = shl i64 [[TMP1]], 1 +; I32-NEXT: [[TMP14:%.*]] = shl i64 [[TMP2]], 1 +; I32-NEXT: [[TMP15:%.*]] = shl i64 [[TMP3]], 1 +; I32-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP12]] +; I32-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP13]] +; I32-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP14]] +; I32-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP15]] +; I32-NEXT: store double [[TMP8]], ptr [[TMP16]], align 8 +; I32-NEXT: store double [[TMP9]], ptr [[TMP17]], align 8 +; I32-NEXT: store double [[TMP10]], ptr [[TMP18]], align 8 +; I32-NEXT: store double [[TMP11]], ptr [[TMP19]], align 8 +; I32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; I32-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; I32-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; I32: [[MIDDLE_BLOCK]]: +; I32-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_EXT]], [[N_VEC]] +; I32-NEXT: br i1 [[CMP_N]], [[EXIT_LOOPEXIT:label %.*]], label %[[SCALAR_PH]] +; I32: [[SCALAR_PH]]: +; +bb: + %pre = icmp slt i32 %n, 1 + br i1 %pre, label %exit, label %ph + +ph: + %n.ext = zext i32 %n to i64 + br label %loop + +loop: + %iv = phi i64 [ 0, %ph ], [ %iv.next, %loop ] + %iv.next = add i64 %iv, 1 + %gep.src = getelementptr i8, ptr %src, i64 %iv + %l = load double, ptr %gep.src, align 8 + %sext = shl i64 %iv, 1 + %gep.dst = getelementptr i8, ptr %dst, i64 %sext + store double %l, ptr %gep.dst, align 8 + %ec = icmp eq i64 %iv.next, %n.ext + br i1 %ec, label %exit, label %loop, !llvm.loop !0 + +exit: + ret void +} + +attributes #0 = { "target-cpu"="znver2" } + +!0 = distinct !{!0, !1} +!1 = !{!"llvm.loop.vectorize.enable", i1 true} diff --git a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll index b713a39..272b62b 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll @@ -33,8 +33,6 @@ define void @value_defined_in_loop1_used_for_trip_counts(i32 %start, i1 %c, ptr ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT_1_LOOPEXIT1:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_3:.*]] ; CHECK: [[LOOP_2_PREHEADER]]: ; CHECK-NEXT: br label %[[LOOP_2:.*]] ; CHECK: [[LOOP_2]]: @@ -48,13 +46,6 @@ define void @value_defined_in_loop1_used_for_trip_counts(i32 %start, i1 %c, ptr ; CHECK-NEXT: store i16 0, ptr [[GEP_DST]], align 2 ; CHECK-NEXT: [[EC_2:%.*]] = icmp ult i64 [[IV_2]], [[IV_1_LCSSA]] ; CHECK-NEXT: br i1 [[EC_2]], label %[[LOOP_2]], label %[[EXIT_1_LOOPEXIT:.*]] -; CHECK: [[LOOP_3]]: -; CHECK-NEXT: [[IV_4:%.*]] = phi i64 [ [[IV_4_NEXT:%.*]], %[[LOOP_3]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP_DST_2:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_4]] -; CHECK-NEXT: store i8 0, ptr [[GEP_DST_2]], align 1 -; CHECK-NEXT: [[IV_4_NEXT]] = add i64 [[IV_4]], 1 -; CHECK-NEXT: [[EC_3:%.*]] = icmp ult i64 [[IV_4_NEXT]], [[IV_1_LCSSA]] -; CHECK-NEXT: br i1 [[EC_3]], label %[[LOOP_3]], label %[[EXIT_1_LOOPEXIT1]] ; CHECK: [[EXIT_1_LOOPEXIT]]: ; CHECK-NEXT: br label %[[EXIT_1:.*]] ; CHECK: [[EXIT_1_LOOPEXIT1]]: diff --git a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll index f877e1b..e99ffda 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll @@ -39,12 +39,8 @@ define void @example1() optsize { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[TMP7:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[TMP6:%.*]] ; CHECK: 6: -; CHECK-NEXT: br i1 poison, label [[TMP7]], label [[TMP6]] -; CHECK: 7: ; CHECK-NEXT: ret void ; br label %1 @@ -123,8 +119,6 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[DOT_PREHEADER_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[DOTLR_PH5:%.*]] ; CHECK: ..preheader_crit_edge: ; CHECK-NEXT: [[PHITMP:%.*]] = zext nneg i32 [[N]] to i64 ; CHECK-NEXT: br label [[DOTPREHEADER]] @@ -134,7 +128,7 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: br i1 [[TMP16]], label [[DOT_CRIT_EDGE:%.*]], label [[DOTLR_PH_PREHEADER:%.*]] ; CHECK: .lr.ph.preheader: ; CHECK-NEXT: br label [[VECTOR_PH8:%.*]] -; CHECK: vector.ph8: +; CHECK: vector.ph7: ; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[N]] to i64 ; CHECK-NEXT: [[N_RND_UP10:%.*]] = add nuw nsw i64 [[TMP17]], 3 ; CHECK-NEXT: [[N_VEC12:%.*]] = and i64 [[N_RND_UP10]], 8589934588 @@ -142,7 +136,7 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: [[BROADCAST_SPLATINSERT19:%.*]] = insertelement <4 x i64> poison, i64 [[TRIP_COUNT_MINUS_114]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT20:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT19]], <4 x i64> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY13:%.*]] -; CHECK: vector.body15: +; CHECK: vector.body14: ; CHECK-NEXT: [[INDEX16:%.*]] = phi i64 [ 0, [[VECTOR_PH8]] ], [ [[INDEX_NEXT29:%.*]], [[PRED_STORE_CONTINUE26:%.*]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[I_0_LCSSA]], [[INDEX16]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT17:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX16]], i64 0 @@ -151,7 +145,7 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: [[TMP18:%.*]] = icmp ule <4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT20]] ; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP18]], i64 0 ; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF19:%.*]], label [[PRED_STORE_CONTINUE20:%.*]] -; CHECK: pred.store.if19: +; CHECK: pred.store.if18: ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4 ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr @c, i64 [[OFFSET_IDX]] @@ -160,10 +154,10 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: [[TMP25:%.*]] = and i32 [[TMP23]], [[TMP21]] ; CHECK-NEXT: store i32 [[TMP25]], ptr [[TMP24]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE20]] -; CHECK: pred.store.continue20: +; CHECK: pred.store.continue19: ; CHECK-NEXT: [[TMP26:%.*]] = extractelement <4 x i1> [[TMP18]], i64 1 ; CHECK-NEXT: br i1 [[TMP26]], label [[PRED_STORE_IF21:%.*]], label [[PRED_STORE_CONTINUE22:%.*]] -; CHECK: pred.store.if21: +; CHECK: pred.store.if20: ; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[OFFSET_IDX]], 1 ; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[TMP27]] ; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4 @@ -173,10 +167,10 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: [[TMP33:%.*]] = and i32 [[TMP31]], [[TMP29]] ; CHECK-NEXT: store i32 [[TMP33]], ptr [[TMP32]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE22]] -; CHECK: pred.store.continue22: +; CHECK: pred.store.continue21: ; CHECK-NEXT: [[TMP34:%.*]] = extractelement <4 x i1> [[TMP18]], i64 2 ; CHECK-NEXT: br i1 [[TMP34]], label [[PRED_STORE_IF23:%.*]], label [[PRED_STORE_CONTINUE24:%.*]] -; CHECK: pred.store.if23: +; CHECK: pred.store.if22: ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[OFFSET_IDX]], 2 ; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[TMP35]] ; CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP36]], align 4 @@ -186,10 +180,10 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: [[TMP41:%.*]] = and i32 [[TMP39]], [[TMP37]] ; CHECK-NEXT: store i32 [[TMP41]], ptr [[TMP40]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE24]] -; CHECK: pred.store.continue24: +; CHECK: pred.store.continue23: ; CHECK-NEXT: [[TMP42:%.*]] = extractelement <4 x i1> [[TMP18]], i64 3 ; CHECK-NEXT: br i1 [[TMP42]], label [[PRED_STORE_IF25:%.*]], label [[PRED_STORE_CONTINUE26]] -; CHECK: pred.store.if25: +; CHECK: pred.store.if24: ; CHECK-NEXT: [[TMP43:%.*]] = add i64 [[OFFSET_IDX]], 3 ; CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[TMP43]] ; CHECK-NEXT: [[TMP45:%.*]] = load i32, ptr [[TMP44]], align 4 @@ -199,18 +193,12 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: [[TMP49:%.*]] = and i32 [[TMP47]], [[TMP45]] ; CHECK-NEXT: store i32 [[TMP49]], ptr [[TMP48]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE26]] -; CHECK: pred.store.continue26: +; CHECK: pred.store.continue25: ; CHECK-NEXT: [[INDEX_NEXT29]] = add nuw i64 [[INDEX16]], 4 ; CHECK-NEXT: [[TMP50:%.*]] = icmp eq i64 [[INDEX_NEXT29]], [[N_VEC12]] -; CHECK-NEXT: br i1 [[TMP50]], label [[MIDDLE_BLOCK28:%.*]], label [[VECTOR_BODY13]], !llvm.loop [[LOOP4:![0-9]+]] -; CHECK: middle.block28: -; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: scalar.ph7: +; CHECK-NEXT: br i1 [[TMP50]], label [[MIDDLE_BLOCK27:%.*]], label [[VECTOR_BODY13]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: middle.block27: ; CHECK-NEXT: br label [[DOTLR_PH1:%.*]] -; CHECK: .lr.ph5: -; CHECK-NEXT: br i1 poison, label [[DOT_PREHEADER_CRIT_EDGE]], label [[DOTLR_PH5]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOTLR_PH]], label [[DOTLR_PH1]] ; CHECK: ._crit_edge.loopexit: ; CHECK-NEXT: br label [[DOT_CRIT_EDGE]] ; CHECK: ._crit_edge: @@ -328,11 +316,7 @@ define void @example3(i32 %n, ptr noalias nocapture %p, ptr noalias nocapture %q ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]] ; CHECK: ._crit_edge.loopexit: ; CHECK-NEXT: br label [[DOT_CRIT_EDGE]] ; CHECK: ._crit_edge: @@ -418,12 +402,8 @@ define void @example23b(ptr noalias nocapture %src, ptr noalias nocapture %dst) ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[TMP5:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[TMP4:%.*]] ; CHECK: 4: -; CHECK-NEXT: br i1 poison, label [[TMP5]], label [[TMP4]] -; CHECK: 5: ; CHECK-NEXT: ret void ; br label %1 @@ -516,12 +496,8 @@ define void @example23c(ptr noalias nocapture %src, ptr noalias nocapture %dst) ; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[TMP26:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[TMP25:%.*]] ; CHECK: 25: -; CHECK-NEXT: br i1 poison, label [[TMP26]], label [[TMP25]] -; CHECK: 26: ; CHECK-NEXT: ret void ; br label %1 diff --git a/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll b/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll index 931c927..15e2678 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll @@ -15,17 +15,17 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; CHECK-NEXT: [[ITER_CHECK:.*]]: ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64 ; CHECK-NEXT: [[IDXPROM5:%.*]] = sext i32 [[J]] to i64 -; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: +; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH1:.*]] -; CHECK: [[VECTOR_PH1]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH1]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP144:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP145:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP146:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP147:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP144:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP145:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP146:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP147:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 @@ -184,15 +184,15 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; CHECK-NEXT: [[TMP149:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX8]]) ; CHECK-NEXT: br i1 false, label %[[FOR_COND_CLEANUP:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: br i1 false, label %[[SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF8:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: -; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ] +; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[TMP171:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[BC_MERGE_RDX]], i32 0 -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDEX9:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI10:%.*]] = phi <4 x i32> [ [[TMP171]], %[[VEC_EPILOG_PH]] ], [ [[TMP168:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; CHECK: [[VEC_EPILOG_VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX9:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI10:%.*]] = phi <4 x i32> [ [[TMP171]], %[[VEC_EPILOG_PH]] ], [ [[TMP168:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP172:%.*]] = add i64 [[INDEX9]], 0 ; CHECK-NEXT: [[TMP173:%.*]] = add i64 [[INDEX9]], 1 ; CHECK-NEXT: [[TMP174:%.*]] = add i64 [[INDEX9]], 2 @@ -216,20 +216,20 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; CHECK-NEXT: [[TMP168]] = add <4 x i32> [[TMP167]], [[TMP166]] ; CHECK-NEXT: [[INDEX_NEXT12]] = add nuw i64 [[INDEX9]], 4 ; CHECK-NEXT: [[TMP169:%.*]] = icmp eq i64 [[INDEX_NEXT12]], 100 -; CHECK-NEXT: br i1 [[TMP169]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP169]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP170:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP168]]) -; CHECK-NEXT: br i1 true, label %[[FOR_COND_CLEANUP]], label %[[SCALAR_PH]] -; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: br i1 true, label %[[FOR_COND_CLEANUP]], label %[[VEC_EPILOG_SCALAR_PH]] +; CHECK: [[VEC_EPILOG_SCALAR_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 100, %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX13:%.*]] = phi i32 [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] -; CHECK-NEXT: br label %[[FOR_BODY1:.*]] +; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_COND_CLEANUP]]: -; CHECK-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], %[[FOR_BODY1]] ], [ [[TMP149]], %[[MIDDLE_BLOCK]] ], [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], %[[FOR_BODY]] ], [ [[TMP149]], %[[MIDDLE_BLOCK]] ], [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[ADD7_LCSSA]] -; CHECK: [[FOR_BODY1]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY1]] ] -; CHECK-NEXT: [[SUM_015:%.*]] = phi i32 [ [[BC_MERGE_RDX13]], %[[SCALAR_PH]] ], [ [[ADD7]], %[[FOR_BODY1]] ] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_015:%.*]] = phi i32 [ [[BC_MERGE_RDX13]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[ADD7]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[TMP150:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4, !tbaa [[INT_TBAA1]] ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[INDVARS_IV]], i64 [[IDXPROM5]] @@ -239,24 +239,24 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; CHECK-NEXT: [[ADD7]] = add i32 [[ADD]], [[MUL]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY1]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; ; MAX-BW-LABEL: define i32 @matrix_row_col( ; MAX-BW-SAME: ptr readonly captures(none) [[DATA:%.*]], i32 [[I:%.*]], i32 [[J:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] { ; MAX-BW-NEXT: [[ITER_CHECK:.*]]: ; MAX-BW-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64 ; MAX-BW-NEXT: [[IDXPROM5:%.*]] = sext i32 [[J]] to i64 -; MAX-BW-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; MAX-BW-NEXT: br i1 false, label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; MAX-BW: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: +; MAX-BW-NEXT: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] ; MAX-BW: [[VECTOR_PH]]: -; MAX-BW-NEXT: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH1:.*]] -; MAX-BW: [[VECTOR_PH1]]: ; MAX-BW-NEXT: br label %[[VECTOR_BODY:.*]] ; MAX-BW: [[VECTOR_BODY]]: -; MAX-BW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH1]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; MAX-BW-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP144:%.*]], %[[VECTOR_BODY]] ] -; MAX-BW-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP145:%.*]], %[[VECTOR_BODY]] ] -; MAX-BW-NEXT: [[VEC_PHI2:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP146:%.*]], %[[VECTOR_BODY]] ] -; MAX-BW-NEXT: [[VEC_PHI3:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP147:%.*]], %[[VECTOR_BODY]] ] +; MAX-BW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; MAX-BW-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP144:%.*]], %[[VECTOR_BODY]] ] +; MAX-BW-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP145:%.*]], %[[VECTOR_BODY]] ] +; MAX-BW-NEXT: [[VEC_PHI2:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP146:%.*]], %[[VECTOR_BODY]] ] +; MAX-BW-NEXT: [[VEC_PHI3:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP147:%.*]], %[[VECTOR_BODY]] ] ; MAX-BW-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 ; MAX-BW-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 ; MAX-BW-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 @@ -415,15 +415,15 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; MAX-BW-NEXT: [[TMP149:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX8]]) ; MAX-BW-NEXT: br i1 false, label %[[FOR_COND_CLEANUP:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; MAX-BW: [[VEC_EPILOG_ITER_CHECK]]: -; MAX-BW-NEXT: br i1 false, label %[[SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; MAX-BW-NEXT: br i1 false, label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF8:![0-9]+]] ; MAX-BW: [[VEC_EPILOG_PH]]: -; MAX-BW-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ] -; MAX-BW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ] +; MAX-BW-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; MAX-BW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; MAX-BW-NEXT: [[TMP171:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[BC_MERGE_RDX]], i32 0 -; MAX-BW-NEXT: br label %[[FOR_BODY:.*]] -; MAX-BW: [[FOR_BODY]]: -; MAX-BW-NEXT: [[INDEX9:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], %[[FOR_BODY]] ] -; MAX-BW-NEXT: [[VEC_PHI10:%.*]] = phi <4 x i32> [ [[TMP171]], %[[VEC_EPILOG_PH]] ], [ [[TMP168:%.*]], %[[FOR_BODY]] ] +; MAX-BW-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; MAX-BW: [[VEC_EPILOG_VECTOR_BODY]]: +; MAX-BW-NEXT: [[INDEX9:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; MAX-BW-NEXT: [[VEC_PHI10:%.*]] = phi <4 x i32> [ [[TMP171]], %[[VEC_EPILOG_PH]] ], [ [[TMP168:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] ; MAX-BW-NEXT: [[TMP172:%.*]] = add i64 [[INDEX9]], 0 ; MAX-BW-NEXT: [[TMP173:%.*]] = add i64 [[INDEX9]], 1 ; MAX-BW-NEXT: [[TMP174:%.*]] = add i64 [[INDEX9]], 2 @@ -447,20 +447,20 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; MAX-BW-NEXT: [[TMP168]] = add <4 x i32> [[TMP167]], [[TMP166]] ; MAX-BW-NEXT: [[INDEX_NEXT12]] = add nuw i64 [[INDEX9]], 4 ; MAX-BW-NEXT: [[TMP169:%.*]] = icmp eq i64 [[INDEX_NEXT12]], 100 -; MAX-BW-NEXT: br i1 [[TMP169]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; MAX-BW-NEXT: br i1 [[TMP169]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; MAX-BW: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; MAX-BW-NEXT: [[TMP170:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP168]]) -; MAX-BW-NEXT: br i1 true, label %[[FOR_COND_CLEANUP]], label %[[SCALAR_PH]] -; MAX-BW: [[SCALAR_PH]]: +; MAX-BW-NEXT: br i1 true, label %[[FOR_COND_CLEANUP]], label %[[VEC_EPILOG_SCALAR_PH]] +; MAX-BW: [[VEC_EPILOG_SCALAR_PH]]: ; MAX-BW-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 100, %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] ; MAX-BW-NEXT: [[BC_MERGE_RDX13:%.*]] = phi i32 [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] -; MAX-BW-NEXT: br label %[[FOR_BODY1:.*]] +; MAX-BW-NEXT: br label %[[FOR_BODY:.*]] ; MAX-BW: [[FOR_COND_CLEANUP]]: -; MAX-BW-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], %[[FOR_BODY1]] ], [ [[TMP149]], %[[MIDDLE_BLOCK]] ], [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] +; MAX-BW-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], %[[FOR_BODY]] ], [ [[TMP149]], %[[MIDDLE_BLOCK]] ], [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] ; MAX-BW-NEXT: ret i32 [[ADD7_LCSSA]] -; MAX-BW: [[FOR_BODY1]]: -; MAX-BW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY1]] ] -; MAX-BW-NEXT: [[SUM_015:%.*]] = phi i32 [ [[BC_MERGE_RDX13]], %[[SCALAR_PH]] ], [ [[ADD7]], %[[FOR_BODY1]] ] +; MAX-BW: [[FOR_BODY]]: +; MAX-BW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] +; MAX-BW-NEXT: [[SUM_015:%.*]] = phi i32 [ [[BC_MERGE_RDX13]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[ADD7]], %[[FOR_BODY]] ] ; MAX-BW-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[INDVARS_IV]] ; MAX-BW-NEXT: [[TMP150:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4, !tbaa [[INT_TBAA1]] ; MAX-BW-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[INDVARS_IV]], i64 [[IDXPROM5]] @@ -470,7 +470,7 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; MAX-BW-NEXT: [[ADD7]] = add i32 [[ADD]], [[MUL]] ; MAX-BW-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; MAX-BW-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 100 -; MAX-BW-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY1]], !llvm.loop [[LOOP9:![0-9]+]] +; MAX-BW-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; entry: %idxprom = sext i32 %i to i64 @@ -555,26 +555,9 @@ define void @test(ptr %A, ptr noalias %B) #0 { ; CHECK-NEXT: store i8 [[TMP35]], ptr [[TMP27]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512 -; CHECK-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[IV_0:%.*]] = add nuw nsw i64 [[IV]], 0 -; CHECK-NEXT: [[IV_1:%.*]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[IN0:%.*]] = getelementptr inbounds [1024 x i32], ptr [[A]], i64 0, i64 [[IV_0]] -; CHECK-NEXT: [[IN1:%.*]] = getelementptr inbounds [1024 x i32], ptr [[A]], i64 0, i64 [[IV_1]] -; CHECK-NEXT: [[V0:%.*]] = load i32, ptr [[IN0]], align 4 -; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[IN1]], align 4 -; CHECK-NEXT: [[REDUCE_ADD_0:%.*]] = add i32 [[V0]], [[V1]] -; CHECK-NEXT: [[REDUCE_ADD_0_NARROW:%.*]] = trunc i32 [[REDUCE_ADD_0]] to i8 -; CHECK-NEXT: [[OUT:%.*]] = getelementptr inbounds [1024 x i8], ptr [[B]], i64 0, i64 [[IV_0]] -; CHECK-NEXT: store i8 [[REDUCE_ADD_0_NARROW]], ptr [[OUT]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV_0]], 2 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP]] ; CHECK: [[FOR_COND_CLEANUP]]: ; CHECK-NEXT: ret void ; @@ -675,26 +658,9 @@ define void @test(ptr %A, ptr noalias %B) #0 { ; MAX-BW-NEXT: store i8 [[TMP67]], ptr [[TMP51]], align 1 ; MAX-BW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; MAX-BW-NEXT: [[TMP68:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512 -; MAX-BW-NEXT: br i1 [[TMP68]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; MAX-BW-NEXT: br i1 [[TMP68]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; MAX-BW: [[MIDDLE_BLOCK]]: ; MAX-BW-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MAX-BW: [[SCALAR_PH:.*]]: -; MAX-BW-NEXT: br label %[[FOR_BODY:.*]] -; MAX-BW: [[FOR_BODY]]: -; MAX-BW-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MAX-BW-NEXT: [[IV_0:%.*]] = add nuw nsw i64 [[IV]], 0 -; MAX-BW-NEXT: [[IV_1:%.*]] = add nuw nsw i64 [[IV]], 1 -; MAX-BW-NEXT: [[IN0:%.*]] = getelementptr inbounds [1024 x i32], ptr [[A]], i64 0, i64 [[IV_0]] -; MAX-BW-NEXT: [[IN1:%.*]] = getelementptr inbounds [1024 x i32], ptr [[A]], i64 0, i64 [[IV_1]] -; MAX-BW-NEXT: [[V0:%.*]] = load i32, ptr [[IN0]], align 4 -; MAX-BW-NEXT: [[V1:%.*]] = load i32, ptr [[IN1]], align 4 -; MAX-BW-NEXT: [[REDUCE_ADD_0:%.*]] = add i32 [[V0]], [[V1]] -; MAX-BW-NEXT: [[REDUCE_ADD_0_NARROW:%.*]] = trunc i32 [[REDUCE_ADD_0]] to i8 -; MAX-BW-NEXT: [[OUT:%.*]] = getelementptr inbounds [1024 x i8], ptr [[B]], i64 0, i64 [[IV_0]] -; MAX-BW-NEXT: store i8 [[REDUCE_ADD_0_NARROW]], ptr [[OUT]], align 1 -; MAX-BW-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV_0]], 2 -; MAX-BW-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 1024 -; MAX-BW-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP]] ; MAX-BW: [[FOR_COND_CLEANUP]]: ; MAX-BW-NEXT: ret void ; @@ -745,9 +711,10 @@ attributes #0 = { "target-cpu"="core-avx2" "target-features"="+avx,+avx2,+sse,+s ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]]} ; CHECK: [[META6]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META7]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META6]], [[META7]]} -; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META7]], [[META6]]} -; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META6]], [[META7]]} +; CHECK: [[PROF8]] = !{!"branch_weights", i32 4, i32 28} +; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META6]], [[META7]]} +; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META7]], [[META6]]} +; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META6]], [[META7]]} ;. ; MAX-BW: [[INT_TBAA1]] = !{[[META2:![0-9]+]], [[META2]], i64 0} ; MAX-BW: [[META2]] = !{!"int", [[META3:![0-9]+]], i64 0} @@ -756,7 +723,8 @@ attributes #0 = { "target-cpu"="core-avx2" "target-features"="+avx,+avx2,+sse,+s ; MAX-BW: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]]} ; MAX-BW: [[META6]] = !{!"llvm.loop.isvectorized", i32 1} ; MAX-BW: [[META7]] = !{!"llvm.loop.unroll.runtime.disable"} -; MAX-BW: [[LOOP8]] = distinct !{[[LOOP8]], [[META6]], [[META7]]} -; MAX-BW: [[LOOP9]] = distinct !{[[LOOP9]], [[META7]], [[META6]]} -; MAX-BW: [[LOOP10]] = distinct !{[[LOOP10]], [[META6]], [[META7]]} +; MAX-BW: [[PROF8]] = !{!"branch_weights", i32 4, i32 28} +; MAX-BW: [[LOOP9]] = distinct !{[[LOOP9]], [[META6]], [[META7]]} +; MAX-BW: [[LOOP10]] = distinct !{[[LOOP10]], [[META7]], [[META6]]} +; MAX-BW: [[LOOP11]] = distinct !{[[LOOP11]], [[META6]], [[META7]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll index 669e925..7069534 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll @@ -28,23 +28,9 @@ define dso_local void @tail_folding_enabled(ptr noalias nocapture %A, ptr noalia ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 432 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]] -; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 430 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; entry: br label %for.body @@ -89,25 +75,11 @@ define dso_local void @tail_folding_disabled(ptr noalias nocapture %A, ptr noali ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP6]], ptr [[TMP7]], i32 4, <8 x i1> [[TMP1]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 432 -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]] -; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 430 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; entry: br label %for.body @@ -170,28 +142,12 @@ define i32 @reduction_i32(ptr nocapture readonly %A, ptr nocapture readonly %B, ; CHECK-NEXT: [[TMP11:%.*]] = select <8 x i1> [[TMP4]], <8 x i32> [[TMP10]], <8 x i32> [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP11]]) -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[SUM_0:%.*]] = phi i32 [ [[SUM_1:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDXA]], align 4 -; CHECK-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDXB]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP14]] -; CHECK-NEXT: [[SUM_1]] = add nuw nsw i32 [[ADD]], [[SUM_0]] -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_1]], [[FOR_BODY]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_1_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP13]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll b/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll index 27150cb..63f9a13 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll @@ -398,27 +398,9 @@ define i32 @test_count_bits(ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX13:%.*]] = add <4 x i32> [[TMP38]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX14:%.*]] = add <4 x i32> [[TMP39]], [[BIN_RDX13]] ; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX14]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[BYTE:%.*]] = udiv i64 [[IV]], 8 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[BYTE]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i8, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: [[BIT:%.*]] = urem i64 [[IV]], 8 -; CHECK-NEXT: [[BIT_TRUNC:%.*]] = trunc i64 [[BIT]] to i8 -; CHECK-NEXT: [[MASK:%.*]] = lshr i8 [[EARLYCND]], [[BIT_TRUNC]] -; CHECK-NEXT: [[TEST:%.*]] = and i8 [[MASK]], 1 -; CHECK-NEXT: [[VAL:%.*]] = zext i8 [[TEST]] to i32 -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LOOP]] ], [ [[TMP41]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP41]] ; entry: %alloca = alloca [4096 x i32] diff --git a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll index 3ae8001..28de5c7 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll @@ -141,20 +141,7 @@ define void @vectorized1(ptr noalias nocapture %A, ptr noalias nocapture readonl ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]] -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP7]] -; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP8]], [[TMP9]] -; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP7]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 20 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -200,22 +187,9 @@ define void @vectorized2(ptr noalias nocapture %A, ptr noalias nocapture readonl ; CHECK-NEXT: store <8 x float> [[TMP5]], ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP7]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]] -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP7]] -; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP7]], [[TMP8]] -; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP7]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 16 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll index 282e9a5..1e94f83 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll @@ -38,21 +38,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]] -; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; IF-EVL: for.cond.cleanup: ; IF-EVL-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll index 69cdd65..455fe83 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll @@ -74,22 +74,7 @@ define void @test_pr59090(ptr %l_out, ptr noalias %b) #0 { ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10008 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP1:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_MUL:%.*]] = mul nuw i64 [[IV]], 6 -; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[B]], align 1, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: store i8 [[L]], ptr [[B]], align 1, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[ARRAYIDX77:%.*]] = getelementptr i8, ptr [[L_OUT]], i64 [[IV_MUL]] -; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX77]], align 1, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[ADD_2:%.*]] = add i64 [[IV_MUL]], 2 -; CHECK-NEXT: [[ARRAYIDX97:%.*]] = getelementptr i8, ptr [[L_OUT]], i64 [[ADD_2]] -; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX97]], align 1, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 10000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll b/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll index bdedcca..9ea9e11 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll @@ -48,25 +48,7 @@ define void @iv.4_used_as_vector_and_first_lane(ptr %src, ptr noalias %dst) { ; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 ; CHECK-NEXT: br i1 [[TMP36]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[G_SRC:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[G_SRC]], align 8 -; CHECK-NEXT: [[IV_4:%.*]] = add nuw nsw i64 [[IV]], 4 -; CHECK-NEXT: [[C:%.*]] = icmp ule i64 [[L]], 128 -; CHECK-NEXT: br i1 [[C]], label [[LOOP_THEN:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.then: -; CHECK-NEXT: [[OR:%.*]] = or disjoint i64 [[IV_4]], 1 -; CHECK-NEXT: [[G_DST:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[OR]] -; CHECK-NEXT: store i64 [[IV_4]], ptr [[G_DST]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 32 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -131,25 +113,7 @@ define void @iv.4_used_as_first_lane(ptr %src, ptr noalias %dst) { ; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 ; CHECK-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[G_SRC:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[G_SRC]], align 8 -; CHECK-NEXT: [[IV_4:%.*]] = add nuw nsw i64 [[IV]], 4 -; CHECK-NEXT: [[C:%.*]] = icmp ule i64 [[L]], 128 -; CHECK-NEXT: br i1 [[C]], label [[LOOP_THEN:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.then: -; CHECK-NEXT: [[OR:%.*]] = or disjoint i64 [[IV_4]], 1 -; CHECK-NEXT: [[G_DST:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[OR]] -; CHECK-NEXT: store i64 [[L]], ptr [[G_DST]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 32 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll b/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll index f9403b8..774f0db 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll @@ -134,30 +134,9 @@ define i32 @predicated_sdiv_masked_load(ptr %a, ptr %b, i32 %x, i1 %c) { ; SINK-GATHER-NEXT: br i1 [[TMP48]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; SINK-GATHER: middle.block: ; SINK-GATHER-NEXT: [[TMP49:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP47]]) -; SINK-GATHER-NEXT: br label [[FOR_END:%.*]] -; SINK-GATHER: scalar.ph: -; SINK-GATHER-NEXT: br label [[FOR_BODY:%.*]] -; SINK-GATHER: for.body: -; SINK-GATHER-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[FOR_INC:%.*]] ] -; SINK-GATHER-NEXT: [[R:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[T7:%.*]], [[FOR_INC]] ] -; SINK-GATHER-NEXT: [[T0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]] -; SINK-GATHER-NEXT: [[T1:%.*]] = load i32, ptr [[T0]], align 4 -; SINK-GATHER-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; SINK-GATHER: if.then: -; SINK-GATHER-NEXT: [[T2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]] -; SINK-GATHER-NEXT: [[T3:%.*]] = load i32, ptr [[T2]], align 4 -; SINK-GATHER-NEXT: [[T4:%.*]] = sdiv i32 [[T3]], [[X]] -; SINK-GATHER-NEXT: [[T5:%.*]] = add nsw i32 [[T4]], [[T1]] -; SINK-GATHER-NEXT: br label [[FOR_INC]] -; SINK-GATHER: for.inc: -; SINK-GATHER-NEXT: [[T6:%.*]] = phi i32 [ [[T1]], [[FOR_BODY]] ], [ [[T5]], [[IF_THEN]] ] -; SINK-GATHER-NEXT: [[T7]] = add i32 [[R]], [[T6]] -; SINK-GATHER-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 -; SINK-GATHER-NEXT: [[COND:%.*]] = icmp eq i64 [[I_NEXT]], 10000 -; SINK-GATHER-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]] +; SINK-GATHER-NEXT: br label [[FOR_INC:%.*]] ; SINK-GATHER: for.end: -; SINK-GATHER-NEXT: [[T8:%.*]] = phi i32 [ [[T7]], [[FOR_INC]] ], [ [[TMP49]], [[MIDDLE_BLOCK]] ] -; SINK-GATHER-NEXT: ret i32 [[T8]] +; SINK-GATHER-NEXT: ret i32 [[TMP49]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/bsd_regex.ll b/llvm/test/Transforms/LoopVectorize/bsd_regex.ll index afdbfaa..f64255f 100644 --- a/llvm/test/Transforms/LoopVectorize/bsd_regex.ll +++ b/llvm/test/Transforms/LoopVectorize/bsd_regex.ll @@ -37,11 +37,7 @@ define i32 @foo(ptr nocapture %A) { ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000 ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 undef ; diff --git a/llvm/test/Transforms/LoopVectorize/check-prof-info.ll b/llvm/test/Transforms/LoopVectorize/check-prof-info.ll index ce9d1f2..b59ad84 100644 --- a/llvm/test/Transforms/LoopVectorize/check-prof-info.ll +++ b/llvm/test/Transforms/LoopVectorize/check-prof-info.ll @@ -19,12 +19,8 @@ define void @_Z3foov() { ; CHECK: vector.body: ; CHECK: br i1 [[TMP6:%.*]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF1:![0-9]+]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK: middle.block: -; CHECK: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: -; CHECK: for.body: -; CHECK: br i1 [[EXITCOND:%.*]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !prof [[PROF5:![0-9]+]] ; ; CHECK-MASKED-LABEL: @_Z3foov( ; CHECK-MASKED: entry: @@ -34,12 +30,8 @@ define void @_Z3foov() { ; CHECK-MASKED: vector.body: ; CHECK-MASKED: br i1 [[TMP18:%.*]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF1:![0-9]+]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK-MASKED: middle.block: -; CHECK-MASKED: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK-MASKED: scalar.ph: ; CHECK-MASKED: br label [[FOR_BODY:%.*]] ; CHECK-MASKED: for.cond.cleanup: -; CHECK-MASKED: for.body: -; CHECK-MASKED: br i1 [[EXITCOND:%.*]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !prof [[PROF5:![0-9]+]] ; ; CHECK-SCALABLE-LABEL: @_Z3foov( ; CHECK-SCALABLE: entry: diff --git a/llvm/test/Transforms/LoopVectorize/constantfolder-infer-correct-gepty.ll b/llvm/test/Transforms/LoopVectorize/constantfolder-infer-correct-gepty.ll index bd0655d..143a0af 100644 --- a/llvm/test/Transforms/LoopVectorize/constantfolder-infer-correct-gepty.ll +++ b/llvm/test/Transforms/LoopVectorize/constantfolder-infer-correct-gepty.ll @@ -19,19 +19,6 @@ define void @test(ptr %data) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[OR_IV_1:%.*]] = or disjoint i64 [[IV]], 1 -; CHECK-NEXT: [[GEP_POSTSCALE:%.*]] = getelementptr [64 x float], ptr @postscale, i64 0, i64 [[OR_IV_1]] -; CHECK-NEXT: [[LOAD_POSTSCALE:%.*]] = load float, ptr [[GEP_POSTSCALE]], align 4, !tbaa [[FLOAT_TBAA0]] -; CHECK-NEXT: [[LRINT:%.*]] = tail call i64 @llvm.lrint.i64.f32(float [[LOAD_POSTSCALE]]) -; CHECK-NEXT: [[LRINT_TRUNC:%.*]] = trunc i64 [[LRINT]] to i16 -; CHECK-NEXT: store i16 [[LRINT_TRUNC]], ptr [[DATA]], align 2, !tbaa [[SHORT_TBAA4]] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV_NEXT]], 8 -; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/constantfolder.ll b/llvm/test/Transforms/LoopVectorize/constantfolder.ll index 37f2e73..66592b0 100644 --- a/llvm/test/Transforms/LoopVectorize/constantfolder.ll +++ b/llvm/test/Transforms/LoopVectorize/constantfolder.ll @@ -16,20 +16,6 @@ define void @const_fold_ptradd(ptr %dst, i64 %d) { ; CHECK-NEXT: br i1 [[TMP1]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[CONST_0:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 0, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[DST]], i64 [[CONST_0]] -; CHECK-NEXT: store i16 0, ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -70,20 +56,6 @@ define void @const_fold_inbounds_ptradd(ptr %dst, i64 %d) { ; CHECK-NEXT: br i1 [[TMP1]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[CONST_0:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 0, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[CONST_0]] -; CHECK-NEXT: store i16 0, ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -125,20 +97,6 @@ define void @const_fold_select(ptr %dst, i64 %d) { ; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[CONST_1:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 1, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[OR:%.*]] = or i64 [[D]], [[CONST_1]] -; CHECK-NEXT: store i64 [[OR]], ptr [[DST]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -179,24 +137,6 @@ define void @const_fold_add_sub_mul_ashr_lshr(ptr %dst, i64 %d) { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[CONST_1:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 1, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[ADD:%.*]] = add i64 2, [[CONST_1]] -; CHECK-NEXT: [[SUB:%.*]] = sub i64 [[ADD]], [[CONST_1]] -; CHECK-NEXT: [[ASHR:%.*]] = ashr i64 [[SUB]], [[CONST_1]] -; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[ASHR]], 3 -; CHECK-NEXT: [[LSHR:%.*]] = lshr i64 [[MUL]], [[CONST_1]] -; CHECK-NEXT: store i64 [[LSHR]], ptr [[DST]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -241,22 +181,6 @@ define void @const_fold_and_or_xor(ptr %dst, i64 %d) { ; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[CONST_1:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 1, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[OR:%.*]] = or i64 2, [[CONST_1]] -; CHECK-NEXT: [[AND:%.*]] = and i64 [[OR]], [[CONST_1]] -; CHECK-NEXT: [[XOR:%.*]] = and i64 [[AND]], [[CONST_1]] -; CHECK-NEXT: store i64 [[XOR]], ptr [[DST]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -299,21 +223,6 @@ define void @const_fold_cmp_zext(ptr %dst, i64 %d) { ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[CONST_1:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 1, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[VAL:%.*]] = icmp ugt i64 2, [[CONST_1]] -; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[VAL]] to i8 -; CHECK-NEXT: store i8 [[ZEXT]], ptr [[DST]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -355,20 +264,6 @@ define void @const_fold_trunc(ptr %dst, i64 %d) { ; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[CONST_0:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 0, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 [[CONST_0]] to i16 -; CHECK-NEXT: store i16 [[TRUNC]], ptr [[DST]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll b/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll index 33e688c..62399c5 100644 --- a/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll +++ b/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll @@ -67,19 +67,7 @@ define void @test(i32 %arg, i32 %L1.limit, i32 %L2.switch, i1 %c, ptr %dst) { ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 12 ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[L2_HEADER_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[L2_INNER_HEADER:%.*]] -; CHECK: L2.Inner.header: -; CHECK-NEXT: [[L2_ACCUM:%.*]] = phi i32 [ [[L2_ACCUM_NEXT:%.*]], [[L2_INNER_HEADER]] ], [ 1, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[L2_IV:%.*]] = phi i64 [ [[L2_IV_NEXT:%.*]], [[L2_INNER_HEADER]] ], [ 1, [[SCALAR_PH]] ] -; CHECK-NEXT: [[L2_ACCUM_NEXT]] = sub i32 [[L2_ACCUM]], [[L1_EXIT_VAL]] -; CHECK-NEXT: [[L2_DUMMY_BUT_NEED_IT:%.*]] = sext i32 [[L2_ACCUM_NEXT]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[L2_IV]] -; CHECK-NEXT: store i64 [[L2_DUMMY_BUT_NEED_IT]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[L2_IV_NEXT]] = add nuw nsw i64 [[L2_IV]], 1 -; CHECK-NEXT: [[L2_EXIT_COND:%.*]] = icmp ugt i64 [[L2_IV]], 11 -; CHECK-NEXT: br i1 [[L2_EXIT_COND]], label [[L2_HEADER_LOOPEXIT]], label [[L2_INNER_HEADER]] ; CHECK: L2.exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll index 0a8e9dc..02e1d0e 100644 --- a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll +++ b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll @@ -94,20 +94,8 @@ define void @pr47390(ptr %a) { ; CHECK-NEXT: br i1 [[TMP0]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[PRIMARY:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[PRIMARY_ADD:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[USE_PRIMARY:%.*]] = phi i32 [ -1, %[[SCALAR_PH]] ], [ [[PRIMARY]], %[[LOOP]] ] -; CHECK-NEXT: [[SECONDARY:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[SECONDARY_ADD:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[PRIMARY_ADD]] = add i32 [[PRIMARY]], 1 -; CHECK-NEXT: [[SECONDARY_ADD]] = add i32 [[SECONDARY]], 1 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[SECONDARY]] -; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[GEP]], align 8 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[SECONDARY]], 5 -; CHECK-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/debugloc-optimize-vfuf-term.ll b/llvm/test/Transforms/LoopVectorize/debugloc-optimize-vfuf-term.ll index f61478b..b31b732 100644 --- a/llvm/test/Transforms/LoopVectorize/debugloc-optimize-vfuf-term.ll +++ b/llvm/test/Transforms/LoopVectorize/debugloc-optimize-vfuf-term.ll @@ -15,15 +15,6 @@ define i32 @foo(ptr %p) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]], !dbg [[DBG3]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ], !dbg [[DBG7:![0-9]+]] -; CHECK-NEXT: [[CONV:%.*]] = trunc i64 0 to i8, !dbg [[DBG8:![0-9]+]] -; CHECK-NEXT: store i8 [[CONV]], ptr [[P]], align 1, !dbg [[DBG3]] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !dbg [[DBG9:![0-9]+]] -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 1, !dbg [[DBG10:![0-9]+]] -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG11:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret i32 0 ; @@ -64,9 +55,4 @@ exit: ; preds = %loop ; CHECK: [[META4]] = distinct !DISubprogram(name: "foo", scope: [[META1]], file: [[META1]], line: 11, type: [[META5:![0-9]+]], spFlags: DISPFlagDefinition, unit: [[META0]], retainedNodes: [[META6:![0-9]+]]) ; CHECK: [[META5]] = distinct !DISubroutineType(types: [[META6]]) ; CHECK: [[META6]] = !{} -; CHECK: [[DBG7]] = !DILocation(line: 4, scope: [[META4]]) -; CHECK: [[DBG8]] = !DILocation(line: 5, scope: [[META4]]) -; CHECK: [[DBG9]] = !DILocation(line: 7, scope: [[META4]]) -; CHECK: [[DBG10]] = !DILocation(line: 8, scope: [[META4]]) -; CHECK: [[DBG11]] = !DILocation(line: 9, scope: [[META4]]) ;. diff --git a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll index d97624f..274bd04 100644 --- a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll +++ b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll @@ -24,16 +24,7 @@ define dso_local void @constTC(ptr noalias nocapture %A) optsize { ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1800 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]] -; CHECK-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], 1800 -; CHECK-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll index 4f5a26e9..156c2bd 100644 --- a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll +++ b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll @@ -198,16 +198,7 @@ define dso_local void @cannotProveAlignedTC(ptr noalias nocapture %A, i32 %p, i3 ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ [[RIVPLUS1:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]] -; CHECK-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], [[N]] -; CHECK-NEXT: br i1 [[COND]], label [[EXIT_LOOPEXIT]], label [[LOOP]] ; CHECK: exit.loopexit: ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll index ff2baec..eca39e6 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll @@ -108,25 +108,8 @@ define i32 @sink_after_dead_inst(ptr %A.ptr) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[TMP2]], i32 2 ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[FOR:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[FOR_PREV:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[FOR]], 15 -; CHECK-NEXT: [[C:%.*]] = icmp eq i1 [[CMP]], true -; CHECK-NEXT: [[VEC_DEAD:%.*]] = and i1 [[C]], true -; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; CHECK-NEXT: [[B1:%.*]] = or i16 [[IV_NEXT]], [[IV_NEXT]] -; CHECK-NEXT: [[B3:%.*]] = and i1 [[CMP]], [[C]] -; CHECK-NEXT: [[FOR_PREV]] = zext i16 [[B1]] to i32 -; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[B3]] to i32 -; CHECK-NEXT: [[A_GEP:%.*]] = getelementptr i32, ptr [[A_PTR]], i16 [[IV]] -; CHECK-NEXT: store i32 0, ptr [[A_GEP]], align 4 -; CHECK-NEXT: br i1 [[VEC_DEAD]], label %[[FOR_END]], label %[[LOOP]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[FOR_LCSSA]] +; CHECK-NEXT: ret i32 [[VECTOR_RECUR_EXTRACT_FOR_PHI]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-interleave-only.ll index fd19760..ebfe16b 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-interleave-only.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-interleave-only.ll @@ -22,21 +22,8 @@ define float @for_load_interleave_only(ptr %src) { ; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[SRC]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[FOR:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[L:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 16 -; CHECK-NEXT: [[L]] = load float, ptr [[PTR_IV]], align 4 -; CHECK-NEXT: store float 0.000000e+00, ptr [[PTR_IV]], align 4 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[FOR_LCSSA:%.*]] = phi float [ [[FOR]], %[[LOOP]] ], [ [[TMP2]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[FOR_LCSSA]] +; CHECK-NEXT: ret float [[TMP2]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll index 149157a..7412980 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll @@ -119,22 +119,7 @@ define void @test_pr54223_sink_after_insertion_order(ptr noalias %a, ptr noalias ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[FOR_1_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SCALAR_RECUR6:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[FOR_2_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[NEG:%.*]] = fneg float [[SCALAR_RECUR6]] -; CHECK-NEXT: [[MULADD:%.*]] = call float @llvm.fmuladd.f32(float [[SCALAR_RECUR]], float [[NEG]], float 0.000000e+00) -; CHECK-NEXT: [[DST_GEP:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[FOR_1_NEXT]] = load float, ptr [[A]], align 4 -; CHECK-NEXT: [[FOR_2_NEXT]] = load float, ptr [[B]], align 4 -; CHECK-NEXT: store float [[MULADD]], ptr [[DST_GEP]], align 4 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 10000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll index 443e44b..bd0c098 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll @@ -1193,19 +1193,9 @@ define i64 @constant_folded_previous_value() { ; UNROLL-NO-IC-NEXT: [[TMP0:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; UNROLL-NO-IC-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; UNROLL-NO-IC: middle.block: -; UNROLL-NO-IC-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NO-IC: scalar.ph: ; UNROLL-NO-IC-NEXT: br label [[SCALAR_BODY:%.*]] -; UNROLL-NO-IC: scalar.body: -; UNROLL-NO-IC-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[SCALAR_BODY]] ] -; UNROLL-NO-IC-NEXT: [[VAR2:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[VAR3:%.*]], [[SCALAR_BODY]] ] -; UNROLL-NO-IC-NEXT: [[VAR3]] = add i64 0, 1 -; UNROLL-NO-IC-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 -; UNROLL-NO-IC-NEXT: [[COND:%.*]] = icmp eq i64 [[I_NEXT]], 1000 -; UNROLL-NO-IC-NEXT: br i1 [[COND]], label [[FOR_END]], label [[SCALAR_BODY]] ; UNROLL-NO-IC: for.end: -; UNROLL-NO-IC-NEXT: [[VAR2_LCSSA:%.*]] = phi i64 [ [[VAR2]], [[SCALAR_BODY]] ], [ 1, [[MIDDLE_BLOCK]] ] -; UNROLL-NO-IC-NEXT: ret i64 [[VAR2_LCSSA]] +; UNROLL-NO-IC-NEXT: ret i64 1 ; ; UNROLL-NO-VF-LABEL: @constant_folded_previous_value( ; UNROLL-NO-VF-NEXT: entry: @@ -1218,19 +1208,9 @@ define i64 @constant_folded_previous_value() { ; UNROLL-NO-VF-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; UNROLL-NO-VF-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; UNROLL-NO-VF: middle.block: -; UNROLL-NO-VF-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NO-VF: scalar.ph: ; UNROLL-NO-VF-NEXT: br label [[SCALAR_BODY:%.*]] -; UNROLL-NO-VF: scalar.body: -; UNROLL-NO-VF-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[SCALAR_BODY]] ] -; UNROLL-NO-VF-NEXT: [[VAR2:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[VAR3:%.*]], [[SCALAR_BODY]] ] -; UNROLL-NO-VF-NEXT: [[VAR3]] = add i64 0, 1 -; UNROLL-NO-VF-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 -; UNROLL-NO-VF-NEXT: [[COND:%.*]] = icmp eq i64 [[I_NEXT]], 1000 -; UNROLL-NO-VF-NEXT: br i1 [[COND]], label [[FOR_END]], label [[SCALAR_BODY]] ; UNROLL-NO-VF: for.end: -; UNROLL-NO-VF-NEXT: [[VAR2_LCSSA:%.*]] = phi i64 [ [[VAR2]], [[SCALAR_BODY]] ], [ 1, [[MIDDLE_BLOCK]] ] -; UNROLL-NO-VF-NEXT: ret i64 [[VAR2_LCSSA]] +; UNROLL-NO-VF-NEXT: ret i64 1 ; ; SINK-AFTER-LABEL: @constant_folded_previous_value( ; SINK-AFTER-NEXT: entry: @@ -1243,19 +1223,9 @@ define i64 @constant_folded_previous_value() { ; SINK-AFTER-NEXT: [[TMP0:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; SINK-AFTER-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; SINK-AFTER: middle.block: -; SINK-AFTER-NEXT: br label [[FOR_END:%.*]] -; SINK-AFTER: scalar.ph: ; SINK-AFTER-NEXT: br label [[SCALAR_BODY:%.*]] -; SINK-AFTER: scalar.body: -; SINK-AFTER-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[SCALAR_BODY]] ] -; SINK-AFTER-NEXT: [[VAR2:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[VAR3:%.*]], [[SCALAR_BODY]] ] -; SINK-AFTER-NEXT: [[VAR3]] = add i64 0, 1 -; SINK-AFTER-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 -; SINK-AFTER-NEXT: [[COND:%.*]] = icmp eq i64 [[I_NEXT]], 1000 -; SINK-AFTER-NEXT: br i1 [[COND]], label [[FOR_END]], label [[SCALAR_BODY]] ; SINK-AFTER: for.end: -; SINK-AFTER-NEXT: [[VAR2_LCSSA:%.*]] = phi i64 [ [[VAR2]], [[SCALAR_BODY]] ], [ 1, [[MIDDLE_BLOCK]] ] -; SINK-AFTER-NEXT: ret i64 [[VAR2_LCSSA]] +; SINK-AFTER-NEXT: ret i64 1 ; entry: br label %scalar.body @@ -2725,21 +2695,9 @@ define i32 @sink_into_replication_region(i32 %y) { ; UNROLL-NO-IC: middle.block: ; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP49]], [[TMP48]] ; UNROLL-NO-IC-NEXT: [[TMP51:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; UNROLL-NO-IC-NEXT: br label [[BB1:%.*]] -; UNROLL-NO-IC: scalar.ph: ; UNROLL-NO-IC-NEXT: br label [[BB2:%.*]] ; UNROLL-NO-IC: bb1: -; UNROLL-NO-IC-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP51]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-IC-NEXT: ret i32 [[VAR]] -; UNROLL-NO-IC: bb2: -; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ] -; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-IC-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] -; UNROLL-NO-IC-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] -; UNROLL-NO-IC-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1 -; UNROLL-NO-IC-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2 -; UNROLL-NO-IC-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27:![0-9]+]] +; UNROLL-NO-IC-NEXT: ret i32 [[TMP51]] ; ; UNROLL-NO-VF-LABEL: @sink_into_replication_region( ; UNROLL-NO-VF-NEXT: bb: @@ -2785,21 +2743,9 @@ define i32 @sink_into_replication_region(i32 %y) { ; UNROLL-NO-VF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF25:![0-9]+]], !llvm.loop [[LOOP26:![0-9]+]] ; UNROLL-NO-VF: middle.block: ; UNROLL-NO-VF-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP13]], [[TMP12]] -; UNROLL-NO-VF-NEXT: br label [[BB1:%.*]] -; UNROLL-NO-VF: scalar.ph: ; UNROLL-NO-VF-NEXT: br label [[BB2:%.*]] ; UNROLL-NO-VF: bb1: -; UNROLL-NO-VF-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-VF-NEXT: ret i32 [[VAR]] -; UNROLL-NO-VF: bb2: -; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ] -; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-VF-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] -; UNROLL-NO-VF-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] -; UNROLL-NO-VF-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1 -; UNROLL-NO-VF-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2 -; UNROLL-NO-VF-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27:![0-9]+]] +; UNROLL-NO-VF-NEXT: ret i32 [[BIN_RDX]] ; ; SINK-AFTER-LABEL: @sink_into_replication_region( ; SINK-AFTER-NEXT: bb: @@ -2868,21 +2814,9 @@ define i32 @sink_into_replication_region(i32 %y) { ; SINK-AFTER-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF25:![0-9]+]], !llvm.loop [[LOOP26:![0-9]+]] ; SINK-AFTER: middle.block: ; SINK-AFTER-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP25]]) -; SINK-AFTER-NEXT: br label [[BB1:%.*]] -; SINK-AFTER: scalar.ph: ; SINK-AFTER-NEXT: br label [[BB2:%.*]] ; SINK-AFTER: bb1: -; SINK-AFTER-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP27]], [[MIDDLE_BLOCK]] ] -; SINK-AFTER-NEXT: ret i32 [[VAR]] -; SINK-AFTER: bb2: -; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ] -; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; SINK-AFTER-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] -; SINK-AFTER-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] -; SINK-AFTER-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1 -; SINK-AFTER-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2 -; SINK-AFTER-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27:![0-9]+]] +; SINK-AFTER-NEXT: ret i32 [[TMP27]] ; bb: br label %bb2 @@ -3078,25 +3012,9 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) { ; UNROLL-NO-IC: middle.block: ; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP73]], [[TMP72]] ; UNROLL-NO-IC-NEXT: [[TMP75:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; UNROLL-NO-IC-NEXT: br label [[BB1:%.*]] -; UNROLL-NO-IC: scalar.ph: ; UNROLL-NO-IC-NEXT: br label [[BB2:%.*]] ; UNROLL-NO-IC: bb1: -; UNROLL-NO-IC-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP75]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-IC-NEXT: ret i32 [[VAR]] -; UNROLL-NO-IC: bb2: -; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ] -; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-IC-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]] -; UNROLL-NO-IC-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] -; UNROLL-NO-IC-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] -; UNROLL-NO-IC-NEXT: store i32 [[VAR3]], ptr [[G]], align 4 -; UNROLL-NO-IC-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1 -; UNROLL-NO-IC-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 -; UNROLL-NO-IC-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2 -; UNROLL-NO-IC-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27]] +; UNROLL-NO-IC-NEXT: ret i32 [[TMP75]] ; ; UNROLL-NO-VF-LABEL: @sink_into_replication_region_multiple( ; UNROLL-NO-VF-NEXT: bb: @@ -3155,25 +3073,9 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) { ; UNROLL-NO-VF-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF25]], !llvm.loop [[LOOP28:![0-9]+]] ; UNROLL-NO-VF: middle.block: ; UNROLL-NO-VF-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP17]], [[TMP16]] -; UNROLL-NO-VF-NEXT: br label [[BB1:%.*]] -; UNROLL-NO-VF: scalar.ph: ; UNROLL-NO-VF-NEXT: br label [[BB2:%.*]] ; UNROLL-NO-VF: bb1: -; UNROLL-NO-VF-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-VF-NEXT: ret i32 [[VAR]] -; UNROLL-NO-VF: bb2: -; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ] -; UNROLL-NO-VF-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-VF-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]] -; UNROLL-NO-VF-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] -; UNROLL-NO-VF-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] -; UNROLL-NO-VF-NEXT: store i32 [[VAR3]], ptr [[G]], align 4 -; UNROLL-NO-VF-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1 -; UNROLL-NO-VF-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 -; UNROLL-NO-VF-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2 -; UNROLL-NO-VF-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27]] +; UNROLL-NO-VF-NEXT: ret i32 [[BIN_RDX]] ; ; SINK-AFTER-LABEL: @sink_into_replication_region_multiple( ; SINK-AFTER-NEXT: bb: @@ -3273,25 +3175,9 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) { ; SINK-AFTER-NEXT: br i1 [[TMP38]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF25]], !llvm.loop [[LOOP28:![0-9]+]] ; SINK-AFTER: middle.block: ; SINK-AFTER-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP37]]) -; SINK-AFTER-NEXT: br label [[BB1:%.*]] -; SINK-AFTER: scalar.ph: ; SINK-AFTER-NEXT: br label [[BB2:%.*]] ; SINK-AFTER: bb1: -; SINK-AFTER-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP39]], [[MIDDLE_BLOCK]] ] -; SINK-AFTER-NEXT: ret i32 [[VAR]] -; SINK-AFTER: bb2: -; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ] -; SINK-AFTER-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; SINK-AFTER-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]] -; SINK-AFTER-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] -; SINK-AFTER-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] -; SINK-AFTER-NEXT: store i32 [[VAR3]], ptr [[G]], align 4 -; SINK-AFTER-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1 -; SINK-AFTER-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 -; SINK-AFTER-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2 -; SINK-AFTER-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27]] +; SINK-AFTER-NEXT: ret i32 [[TMP39]] ; bb: br label %bb2 @@ -3341,26 +3227,9 @@ define i32 @sink_after_dead_inst(ptr %A.ptr, i32 %n) { ; UNROLL-NO-IC-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; UNROLL-NO-IC: middle.block: ; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[TMP3]], i32 2 -; UNROLL-NO-IC-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NO-IC: scalar.ph: ; UNROLL-NO-IC-NEXT: br label [[LOOP:%.*]] -; UNROLL-NO-IC: loop: -; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[FOR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[FOR_PREV:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[CMP:%.*]] = icmp eq i32 [[FOR]], 15 -; UNROLL-NO-IC-NEXT: [[C:%.*]] = icmp eq i1 [[CMP]], true -; UNROLL-NO-IC-NEXT: [[VEC_DEAD:%.*]] = and i1 [[C]], true -; UNROLL-NO-IC-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; UNROLL-NO-IC-NEXT: [[B1:%.*]] = or i16 [[IV_NEXT]], [[IV_NEXT]] -; UNROLL-NO-IC-NEXT: [[B3:%.*]] = and i1 [[CMP]], [[C]] -; UNROLL-NO-IC-NEXT: [[FOR_PREV]] = zext i16 [[B1]] to i32 -; UNROLL-NO-IC-NEXT: [[EXT:%.*]] = zext i1 [[B3]] to i32 -; UNROLL-NO-IC-NEXT: [[A_GEP:%.*]] = getelementptr i32, ptr [[A_PTR]], i16 [[IV]] -; UNROLL-NO-IC-NEXT: store i32 0, ptr [[A_GEP]], align 4 -; UNROLL-NO-IC-NEXT: br i1 [[VEC_DEAD]], label [[FOR_END]], label [[LOOP]] ; UNROLL-NO-IC: for.end: -; UNROLL-NO-IC-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], [[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-IC-NEXT: ret i32 [[FOR_LCSSA]] +; UNROLL-NO-IC-NEXT: ret i32 [[VECTOR_RECUR_EXTRACT_FOR_PHI]] ; ; UNROLL-NO-VF-LABEL: @sink_after_dead_inst( ; UNROLL-NO-VF-NEXT: entry: @@ -3382,26 +3251,9 @@ define i32 @sink_after_dead_inst(ptr %A.ptr, i32 %n) { ; UNROLL-NO-VF-NEXT: [[TMP11:%.*]] = icmp eq i32 [[TMP7]], 16 ; UNROLL-NO-VF-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; UNROLL-NO-VF: middle.block: -; UNROLL-NO-VF-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NO-VF: scalar.ph: ; UNROLL-NO-VF-NEXT: br label [[LOOP:%.*]] -; UNROLL-NO-VF: loop: -; UNROLL-NO-VF-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; UNROLL-NO-VF-NEXT: [[FOR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[FOR_PREV:%.*]], [[LOOP]] ] -; UNROLL-NO-VF-NEXT: [[CMP:%.*]] = icmp eq i32 [[FOR]], 15 -; UNROLL-NO-VF-NEXT: [[C:%.*]] = icmp eq i1 [[CMP]], true -; UNROLL-NO-VF-NEXT: [[VEC_DEAD:%.*]] = and i1 [[C]], true -; UNROLL-NO-VF-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; UNROLL-NO-VF-NEXT: [[B1:%.*]] = or i16 [[IV_NEXT]], [[IV_NEXT]] -; UNROLL-NO-VF-NEXT: [[B3:%.*]] = and i1 [[CMP]], [[C]] -; UNROLL-NO-VF-NEXT: [[FOR_PREV]] = zext i16 [[B1]] to i32 -; UNROLL-NO-VF-NEXT: [[EXT:%.*]] = zext i1 [[B3]] to i32 -; UNROLL-NO-VF-NEXT: [[A_GEP:%.*]] = getelementptr i32, ptr [[A_PTR]], i16 [[IV]] -; UNROLL-NO-VF-NEXT: store i32 0, ptr [[A_GEP]], align 4 -; UNROLL-NO-VF-NEXT: br i1 [[VEC_DEAD]], label [[FOR_END]], label [[LOOP]] ; UNROLL-NO-VF: for.end: -; UNROLL-NO-VF-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], [[LOOP]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-VF-NEXT: ret i32 [[FOR_LCSSA]] +; UNROLL-NO-VF-NEXT: ret i32 [[TMP10]] ; ; SINK-AFTER-LABEL: @sink_after_dead_inst( ; SINK-AFTER-NEXT: entry: @@ -3423,26 +3275,9 @@ define i32 @sink_after_dead_inst(ptr %A.ptr, i32 %n) { ; SINK-AFTER-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; SINK-AFTER: middle.block: ; SINK-AFTER-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[TMP3]], i32 2 -; SINK-AFTER-NEXT: br label [[FOR_END:%.*]] -; SINK-AFTER: scalar.ph: ; SINK-AFTER-NEXT: br label [[LOOP:%.*]] -; SINK-AFTER: loop: -; SINK-AFTER-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; SINK-AFTER-NEXT: [[FOR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[FOR_PREV:%.*]], [[LOOP]] ] -; SINK-AFTER-NEXT: [[CMP:%.*]] = icmp eq i32 [[FOR]], 15 -; SINK-AFTER-NEXT: [[C:%.*]] = icmp eq i1 [[CMP]], true -; SINK-AFTER-NEXT: [[VEC_DEAD:%.*]] = and i1 [[C]], true -; SINK-AFTER-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; SINK-AFTER-NEXT: [[B1:%.*]] = or i16 [[IV_NEXT]], [[IV_NEXT]] -; SINK-AFTER-NEXT: [[B3:%.*]] = and i1 [[CMP]], [[C]] -; SINK-AFTER-NEXT: [[FOR_PREV]] = zext i16 [[B1]] to i32 -; SINK-AFTER-NEXT: [[EXT:%.*]] = zext i1 [[B3]] to i32 -; SINK-AFTER-NEXT: [[A_GEP:%.*]] = getelementptr i32, ptr [[A_PTR]], i16 [[IV]] -; SINK-AFTER-NEXT: store i32 0, ptr [[A_GEP]], align 4 -; SINK-AFTER-NEXT: br i1 [[VEC_DEAD]], label [[FOR_END]], label [[LOOP]] ; SINK-AFTER: for.end: -; SINK-AFTER-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], [[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], [[MIDDLE_BLOCK]] ] -; SINK-AFTER-NEXT: ret i32 [[FOR_LCSSA]] +; SINK-AFTER-NEXT: ret i32 [[VECTOR_RECUR_EXTRACT_FOR_PHI]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/flags.ll b/llvm/test/Transforms/LoopVectorize/flags.ll index cb86f5f..2268085 100644 --- a/llvm/test/Transforms/LoopVectorize/flags.ll +++ b/llvm/test/Transforms/LoopVectorize/flags.ll @@ -129,20 +129,8 @@ define float @fast_math(ptr noalias %s) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP3:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP1]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[S]], i64 [[IV]] -; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = fadd fast float [[RED]], [[TMP4]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 256 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], %[[LOOP]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[ADD_LCSSA]] +; CHECK-NEXT: ret float [[TMP3]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/float-induction.ll b/llvm/test/Transforms/LoopVectorize/float-induction.ll index 901f67e..f56699a 100644 --- a/llvm/test/Transforms/LoopVectorize/float-induction.ll +++ b/llvm/test/Transforms/LoopVectorize/float-induction.ll @@ -1649,11 +1649,7 @@ define i32 @float_induction_with_dbg_on_fadd(ptr %dst) { ; VEC4_INTERL1-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; VEC4_INTERL1-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; VEC4_INTERL1: middle.block: -; VEC4_INTERL1-NEXT: br label [[EXIT:%.*]] -; VEC4_INTERL1: scalar.ph: ; VEC4_INTERL1-NEXT: br label [[LOOP:%.*]] -; VEC4_INTERL1: loop: -; VEC4_INTERL1-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; VEC4_INTERL1: exit: ; VEC4_INTERL1-NEXT: ret i32 0 ; @@ -1672,11 +1668,7 @@ define i32 @float_induction_with_dbg_on_fadd(ptr %dst) { ; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; VEC4_INTERL2-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; VEC4_INTERL2: middle.block: -; VEC4_INTERL2-NEXT: br label [[EXIT:%.*]] -; VEC4_INTERL2: scalar.ph: ; VEC4_INTERL2-NEXT: br label [[LOOP:%.*]] -; VEC4_INTERL2: loop: -; VEC4_INTERL2-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; VEC4_INTERL2: exit: ; VEC4_INTERL2-NEXT: ret i32 0 ; @@ -1699,11 +1691,7 @@ define i32 @float_induction_with_dbg_on_fadd(ptr %dst) { ; VEC1_INTERL2-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; VEC1_INTERL2-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; VEC1_INTERL2: middle.block: -; VEC1_INTERL2-NEXT: br label [[EXIT:%.*]] -; VEC1_INTERL2: scalar.ph: ; VEC1_INTERL2-NEXT: br label [[LOOP:%.*]] -; VEC1_INTERL2: loop: -; VEC1_INTERL2-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; VEC1_INTERL2: exit: ; VEC1_INTERL2-NEXT: ret i32 0 ; diff --git a/llvm/test/Transforms/LoopVectorize/float-minmax-instruction-flag.ll b/llvm/test/Transforms/LoopVectorize/float-minmax-instruction-flag.ll index 93031c7..555e695 100644 --- a/llvm/test/Transforms/LoopVectorize/float-minmax-instruction-flag.ll +++ b/llvm/test/Transforms/LoopVectorize/float-minmax-instruction-flag.ll @@ -66,22 +66,9 @@ define float @minloopattr(ptr nocapture readonly %arg) #0 { ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> [[TMP4]]) -; CHECK-NEXT: br label [[OUT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[T1:%.*]] = phi i64 [ [[T7:%.*]], [[LOOP]] ], [ 1, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[T2:%.*]] = phi float [ [[T6:%.*]], [[LOOP]] ], [ [[T]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[T3:%.*]] = getelementptr float, ptr [[ARG]], i64 [[T1]] -; CHECK-NEXT: [[T4:%.*]] = load float, ptr [[T3]], align 4 -; CHECK-NEXT: [[T5:%.*]] = fcmp olt float [[T2]], [[T4]] -; CHECK-NEXT: [[T6]] = select i1 [[T5]], float [[T2]], float [[T4]] -; CHECK-NEXT: [[T7]] = add i64 [[T1]], 1 -; CHECK-NEXT: [[T8:%.*]] = icmp eq i64 [[T7]], 65537 -; CHECK-NEXT: br i1 [[T8]], label [[OUT]], label [[LOOP]] ; CHECK: out: -; CHECK-NEXT: [[T6_LCSSA:%.*]] = phi float [ [[T6]], [[LOOP]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[T6_LCSSA]] +; CHECK-NEXT: ret float [[TMP6]] ; top: %t = load float, ptr %arg diff --git a/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll b/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll index c86e271..f7376a0 100644 --- a/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll +++ b/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll @@ -67,23 +67,7 @@ define i32 @test(ptr nocapture %f) #0 { ; UNROLL-NOSIMPLIFY-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; UNROLL-NOSIMPLIFY: middle.block: -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NOSIMPLIFY: scalar.ph: -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_BODY:%.*]] -; UNROLL-NOSIMPLIFY: for.body: -; UNROLL-NOSIMPLIFY-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; UNROLL-NOSIMPLIFY-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[INDVARS_IV]] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; UNROLL-NOSIMPLIFY-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP11]], 100 -; UNROLL-NOSIMPLIFY-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; UNROLL-NOSIMPLIFY: if.then: -; UNROLL-NOSIMPLIFY-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], 20 -; UNROLL-NOSIMPLIFY-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC]] -; UNROLL-NOSIMPLIFY: for.inc: -; UNROLL-NOSIMPLIFY-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; UNROLL-NOSIMPLIFY-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 128 -; UNROLL-NOSIMPLIFY-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]] +; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC:%.*]] ; UNROLL-NOSIMPLIFY: for.end: ; UNROLL-NOSIMPLIFY-NEXT: ret i32 0 ; @@ -449,25 +433,7 @@ define void @minimal_bit_widths(i1 %c) { ; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; UNROLL-NOSIMPLIFY: middle.block: -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NOSIMPLIFY: scalar.ph: -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_BODY:%.*]] -; UNROLL-NOSIMPLIFY: for.body: -; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = phi i64 [ [[TMP9:%.*]], [[FOR_INC:%.*]] ], [ 0, [[SCALAR_PH:%.*]] ] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP2:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ 1000, [[SCALAR_PH]] ] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr undef, i64 [[TMP1]] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1 -; UNROLL-NOSIMPLIFY-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; UNROLL-NOSIMPLIFY: if.then: -; UNROLL-NOSIMPLIFY-NEXT: [[TMP5:%.*]] = zext i8 [[TMP4]] to i32 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i8 -; UNROLL-NOSIMPLIFY-NEXT: store i8 [[TMP6]], ptr [[TMP3]], align 1 -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC]] -; UNROLL-NOSIMPLIFY: for.inc: -; UNROLL-NOSIMPLIFY-NEXT: [[TMP9]] = add nuw nsw i64 [[TMP1]], 1 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP7]] = add i64 [[TMP2]], -1 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0 -; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]] +; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC:%.*]] ; UNROLL-NOSIMPLIFY: for.end: ; UNROLL-NOSIMPLIFY-NEXT: ret void ; @@ -575,26 +541,7 @@ define void @minimal_bit_widths_with_aliasing_store(i1 %c, ptr %ptr) { ; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; UNROLL-NOSIMPLIFY: middle.block: -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NOSIMPLIFY: scalar.ph: -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_BODY:%.*]] -; UNROLL-NOSIMPLIFY: for.body: -; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = phi i64 [ [[TMP9:%.*]], [[FOR_INC:%.*]] ], [ 0, [[SCALAR_PH:%.*]] ] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP2:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ 1000, [[SCALAR_PH]] ] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP1]] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1 -; UNROLL-NOSIMPLIFY-NEXT: store i8 0, ptr [[TMP3]], align 1 -; UNROLL-NOSIMPLIFY-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; UNROLL-NOSIMPLIFY: if.then: -; UNROLL-NOSIMPLIFY-NEXT: [[TMP5:%.*]] = zext i8 [[TMP4]] to i32 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i8 -; UNROLL-NOSIMPLIFY-NEXT: store i8 [[TMP6]], ptr [[TMP3]], align 1 -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC]] -; UNROLL-NOSIMPLIFY: for.inc: -; UNROLL-NOSIMPLIFY-NEXT: [[TMP9]] = add nuw nsw i64 [[TMP1]], 1 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP7]] = add i64 [[TMP2]], -1 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0 -; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]] +; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC:%.*]] ; UNROLL-NOSIMPLIFY: for.end: ; UNROLL-NOSIMPLIFY-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/induction-multiple-uses-in-same-instruction.ll b/llvm/test/Transforms/LoopVectorize/induction-multiple-uses-in-same-instruction.ll index f0b32c6..ccf05d7 100644 --- a/llvm/test/Transforms/LoopVectorize/induction-multiple-uses-in-same-instruction.ll +++ b/llvm/test/Transforms/LoopVectorize/induction-multiple-uses-in-same-instruction.ll @@ -24,17 +24,7 @@ define void @multiple_iv_uses_in_same_instruction(ptr %ptr) { ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [100 x [100 x i32]], ptr [[PTR]], i64 0, i64 [[IV]], i64 [[IV]] -; CHECK-NEXT: [[T:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-NEXT: store i32 [[T]], ptr [[GEP]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/induction-step.ll b/llvm/test/Transforms/LoopVectorize/induction-step.ll index 362de0e..53d5ac4 100644 --- a/llvm/test/Transforms/LoopVectorize/induction-step.ll +++ b/llvm/test/Transforms/LoopVectorize/induction-step.ll @@ -291,18 +291,6 @@ define void @iv_no_binary_op_in_descriptor(i1 %c, ptr %dst) { ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT_P:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i64 [[IV]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT:%.*]] = add i64 [[IV]], 1 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV_NEXT_P]] = phi i64 [ [[IV_NEXT]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT_P]], 1000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/induction.ll b/llvm/test/Transforms/LoopVectorize/induction.ll index 60c844c..cc55a51 100644 --- a/llvm/test/Transforms/LoopVectorize/induction.ll +++ b/llvm/test/Transforms/LoopVectorize/induction.ll @@ -2764,19 +2764,9 @@ define i32 @i8_loop() nounwind readnone ssp uwtable { ; CHECK-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> [[TMP0]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[A_0:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[A_0_AND:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[B_0:%.*]] = phi i8 [ 0, [[SCALAR_PH]] ], [ [[B_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[A_0_AND]] = and i32 [[A_0]], 4 -; CHECK-NEXT: [[B_NEXT]] = add i8 [[B_0]], -1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i8 [[B_NEXT]], 0 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[A_0_AND_LCSSA:%.*]] = phi i32 [ [[A_0_AND]], [[LOOP]] ], [ [[TMP2]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[A_0_AND_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP2]] ; ; IND-LABEL: @i8_loop( ; IND-NEXT: entry: @@ -2789,11 +2779,7 @@ define i32 @i8_loop() nounwind readnone ssp uwtable { ; IND-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; IND-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; IND: middle.block: -; IND-NEXT: br label [[EXIT:%.*]] -; IND: scalar.ph: ; IND-NEXT: br label [[LOOP:%.*]] -; IND: loop: -; IND-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; IND: exit: ; IND-NEXT: ret i32 0 ; @@ -2808,11 +2794,7 @@ define i32 @i8_loop() nounwind readnone ssp uwtable { ; UNROLL-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; UNROLL-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; UNROLL: middle.block: -; UNROLL-NEXT: br label [[EXIT:%.*]] -; UNROLL: scalar.ph: ; UNROLL-NEXT: br label [[LOOP:%.*]] -; UNROLL: loop: -; UNROLL-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; UNROLL: exit: ; UNROLL-NEXT: ret i32 0 ; @@ -2833,19 +2815,9 @@ define i32 @i8_loop() nounwind readnone ssp uwtable { ; UNROLL-NO-IC: middle.block: ; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = and <2 x i32> [[TMP1]], [[TMP0]] ; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> [[BIN_RDX]]) -; UNROLL-NO-IC-NEXT: br label [[EXIT:%.*]] -; UNROLL-NO-IC: scalar.ph: ; UNROLL-NO-IC-NEXT: br label [[LOOP:%.*]] -; UNROLL-NO-IC: loop: -; UNROLL-NO-IC-NEXT: [[A_0:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[A_0_AND:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[B_0:%.*]] = phi i8 [ 0, [[SCALAR_PH]] ], [ [[B_NEXT:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[A_0_AND]] = and i32 [[A_0]], 4 -; UNROLL-NO-IC-NEXT: [[B_NEXT]] = add i8 [[B_0]], -1 -; UNROLL-NO-IC-NEXT: [[EC:%.*]] = icmp eq i8 [[B_NEXT]], 0 -; UNROLL-NO-IC-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; UNROLL-NO-IC: exit: -; UNROLL-NO-IC-NEXT: [[A_0_AND_LCSSA:%.*]] = phi i32 [ [[A_0_AND]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-IC-NEXT: ret i32 [[A_0_AND_LCSSA]] +; UNROLL-NO-IC-NEXT: ret i32 [[TMP3]] ; ; INTERLEAVE-LABEL: @i8_loop( ; INTERLEAVE-NEXT: entry: @@ -2858,11 +2830,7 @@ define i32 @i8_loop() nounwind readnone ssp uwtable { ; INTERLEAVE-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; INTERLEAVE-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; INTERLEAVE: middle.block: -; INTERLEAVE-NEXT: br label [[EXIT:%.*]] -; INTERLEAVE: scalar.ph: ; INTERLEAVE-NEXT: br label [[LOOP:%.*]] -; INTERLEAVE: loop: -; INTERLEAVE-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; INTERLEAVE: exit: ; INTERLEAVE-NEXT: ret i32 0 ; @@ -2897,19 +2865,9 @@ define i32 @i16_loop() nounwind readnone ssp uwtable { ; CHECK-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> [[TMP0]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[A_0:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[A_0_AND:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[B_0:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[B_0_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[A_0_AND]] = and i32 [[A_0]], 4 -; CHECK-NEXT: [[B_0_NEXT]] = add i16 [[B_0]], -1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i16 [[B_0_NEXT]], 0 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[A_0_AND_LCSSA:%.*]] = phi i32 [ [[A_0_AND]], [[LOOP]] ], [ [[TMP2]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[A_0_AND_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP2]] ; ; IND-LABEL: @i16_loop( ; IND-NEXT: entry: @@ -2922,11 +2880,7 @@ define i32 @i16_loop() nounwind readnone ssp uwtable { ; IND-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 65536 ; IND-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; IND: middle.block: -; IND-NEXT: br label [[EXIT:%.*]] -; IND: scalar.ph: ; IND-NEXT: br label [[LOOP:%.*]] -; IND: loop: -; IND-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; IND: exit: ; IND-NEXT: ret i32 0 ; @@ -2941,11 +2895,7 @@ define i32 @i16_loop() nounwind readnone ssp uwtable { ; UNROLL-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 65536 ; UNROLL-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; UNROLL: middle.block: -; UNROLL-NEXT: br label [[EXIT:%.*]] -; UNROLL: scalar.ph: ; UNROLL-NEXT: br label [[LOOP:%.*]] -; UNROLL: loop: -; UNROLL-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; UNROLL: exit: ; UNROLL-NEXT: ret i32 0 ; @@ -2966,19 +2916,9 @@ define i32 @i16_loop() nounwind readnone ssp uwtable { ; UNROLL-NO-IC: middle.block: ; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = and <2 x i32> [[TMP1]], [[TMP0]] ; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> [[BIN_RDX]]) -; UNROLL-NO-IC-NEXT: br label [[EXIT:%.*]] -; UNROLL-NO-IC: scalar.ph: ; UNROLL-NO-IC-NEXT: br label [[LOOP:%.*]] -; UNROLL-NO-IC: loop: -; UNROLL-NO-IC-NEXT: [[A_0:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[A_0_AND:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[B_0:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[B_0_NEXT:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[A_0_AND]] = and i32 [[A_0]], 4 -; UNROLL-NO-IC-NEXT: [[B_0_NEXT]] = add i16 [[B_0]], -1 -; UNROLL-NO-IC-NEXT: [[EC:%.*]] = icmp eq i16 [[B_0_NEXT]], 0 -; UNROLL-NO-IC-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; UNROLL-NO-IC: exit: -; UNROLL-NO-IC-NEXT: [[A_0_AND_LCSSA:%.*]] = phi i32 [ [[A_0_AND]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-IC-NEXT: ret i32 [[A_0_AND_LCSSA]] +; UNROLL-NO-IC-NEXT: ret i32 [[TMP3]] ; ; INTERLEAVE-LABEL: @i16_loop( ; INTERLEAVE-NEXT: entry: @@ -2991,11 +2931,7 @@ define i32 @i16_loop() nounwind readnone ssp uwtable { ; INTERLEAVE-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 65536 ; INTERLEAVE-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; INTERLEAVE: middle.block: -; INTERLEAVE-NEXT: br label [[EXIT:%.*]] -; INTERLEAVE: scalar.ph: ; INTERLEAVE-NEXT: br label [[LOOP:%.*]] -; INTERLEAVE: loop: -; INTERLEAVE-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; INTERLEAVE: exit: ; INTERLEAVE-NEXT: ret i32 0 ; @@ -5025,28 +4961,9 @@ define i32 @PR32419(i32 %a, i16 %b) { ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP15]]) -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I:%.*]] = phi i32 [ -20, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-NEXT: [[VAR0:%.*]] = phi i32 [ [[A]], [[SCALAR_PH]] ], [ [[VAR6:%.*]], [[FOR_INC]] ] -; CHECK-NEXT: [[VAR1:%.*]] = trunc i32 [[I]] to i16 -; CHECK-NEXT: [[VAR2:%.*]] = icmp eq i16 [[VAR1]], 0 -; CHECK-NEXT: br i1 [[VAR2]], label [[FOR_INC]], label [[FOR_COND:%.*]] -; CHECK: for.cond: -; CHECK-NEXT: [[VAR3:%.*]] = urem i16 [[B]], [[VAR1]] -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: [[VAR4:%.*]] = phi i16 [ [[VAR3]], [[FOR_COND]] ], [ 0, [[FOR_BODY]] ] -; CHECK-NEXT: [[VAR5:%.*]] = sext i16 [[VAR4]] to i32 -; CHECK-NEXT: [[VAR6]] = or i32 [[VAR0]], [[VAR5]] -; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[I_NEXT]], 0 -; CHECK-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: for.end: -; CHECK-NEXT: [[VAR7:%.*]] = phi i32 [ [[VAR6]], [[FOR_INC]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[VAR7]] +; CHECK-NEXT: ret i32 [[TMP17]] ; ; IND-LABEL: @PR32419( ; IND-NEXT: entry: @@ -5086,15 +5003,7 @@ define i32 @PR32419(i32 %a, i16 %b) { ; IND-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], 20 ; IND-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]] ; IND: middle.block: -; IND-NEXT: br label [[FOR_END:%.*]] -; IND: scalar.ph: -; IND-NEXT: br label [[FOR_BODY:%.*]] -; IND: for.body: -; IND-NEXT: br i1 poison, label [[FOR_INC:%.*]], label [[FOR_COND:%.*]] -; IND: for.cond: -; IND-NEXT: br label [[FOR_INC]] -; IND: for.inc: -; IND-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] +; IND-NEXT: br label [[FOR_INC:%.*]] ; IND: for.end: ; IND-NEXT: [[VAR7:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP14]]) ; IND-NEXT: ret i32 [[VAR7]] @@ -5160,15 +5069,7 @@ define i32 @PR32419(i32 %a, i16 %b) { ; UNROLL-NEXT: [[TMP28:%.*]] = icmp eq i32 [[INDEX_NEXT]], 20 ; UNROLL-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]] ; UNROLL: middle.block: -; UNROLL-NEXT: br label [[FOR_END:%.*]] -; UNROLL: scalar.ph: -; UNROLL-NEXT: br label [[FOR_BODY:%.*]] -; UNROLL: for.body: -; UNROLL-NEXT: br i1 poison, label [[FOR_INC:%.*]], label [[FOR_COND:%.*]] -; UNROLL: for.cond: -; UNROLL-NEXT: br label [[FOR_INC]] -; UNROLL: for.inc: -; UNROLL-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] +; UNROLL-NEXT: br label [[FOR_INC:%.*]] ; UNROLL: for.end: ; UNROLL-NEXT: [[BIN_RDX:%.*]] = or <2 x i32> [[TMP27]], [[TMP26]] ; UNROLL-NEXT: [[VAR7:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[BIN_RDX]]) @@ -5239,28 +5140,9 @@ define i32 @PR32419(i32 %a, i16 %b) { ; UNROLL-NO-IC: middle.block: ; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = or <2 x i32> [[TMP29]], [[TMP28]] ; UNROLL-NO-IC-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[BIN_RDX]]) -; UNROLL-NO-IC-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NO-IC: scalar.ph: -; UNROLL-NO-IC-NEXT: br label [[FOR_BODY:%.*]] -; UNROLL-NO-IC: for.body: -; UNROLL-NO-IC-NEXT: [[I:%.*]] = phi i32 [ -20, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[FOR_INC:%.*]] ] -; UNROLL-NO-IC-NEXT: [[VAR0:%.*]] = phi i32 [ [[A]], [[SCALAR_PH]] ], [ [[VAR6:%.*]], [[FOR_INC]] ] -; UNROLL-NO-IC-NEXT: [[VAR1:%.*]] = trunc i32 [[I]] to i16 -; UNROLL-NO-IC-NEXT: [[VAR2:%.*]] = icmp eq i16 [[VAR1]], 0 -; UNROLL-NO-IC-NEXT: br i1 [[VAR2]], label [[FOR_INC]], label [[FOR_COND:%.*]] -; UNROLL-NO-IC: for.cond: -; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = urem i16 [[B]], [[VAR1]] -; UNROLL-NO-IC-NEXT: br label [[FOR_INC]] -; UNROLL-NO-IC: for.inc: -; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i16 [ [[VAR3]], [[FOR_COND]] ], [ 0, [[FOR_BODY]] ] -; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = sext i16 [[VAR4]] to i32 -; UNROLL-NO-IC-NEXT: [[VAR6]] = or i32 [[VAR0]], [[VAR5]] -; UNROLL-NO-IC-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1 -; UNROLL-NO-IC-NEXT: [[COND:%.*]] = icmp eq i32 [[I_NEXT]], 0 -; UNROLL-NO-IC-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]] +; UNROLL-NO-IC-NEXT: br label [[FOR_INC:%.*]] ; UNROLL-NO-IC: for.end: -; UNROLL-NO-IC-NEXT: [[VAR7:%.*]] = phi i32 [ [[VAR6]], [[FOR_INC]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-IC-NEXT: ret i32 [[VAR7]] +; UNROLL-NO-IC-NEXT: ret i32 [[TMP31]] ; ; INTERLEAVE-LABEL: @PR32419( ; INTERLEAVE-NEXT: entry: @@ -5818,23 +5700,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr % ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[TRUNC_IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[TRUNC_IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RECUR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[SRC]], align 4 -; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[RECUR]] -; CHECK-NEXT: [[TRUNC_IV_NEXT]] = add i32 [[TRUNC_IV]], 1 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[IV_TRUNC]] = trunc i64 [[IV]] to i32 -; CHECK-NEXT: [[DST_GEP:%.*]] = getelementptr i32, ptr [[DST]], i32 [[IV_TRUNC]] -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[IV_TRUNC]], [[MUL]] -; CHECK-NEXT: store i32 [[ADD]], ptr [[DST_GEP]], align 4 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[TRUNC_IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -5862,11 +5728,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr % ; IND-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; IND-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]] ; IND: middle.block: -; IND-NEXT: br label [[EXIT:%.*]] -; IND: scalar.ph: ; IND-NEXT: br label [[LOOP:%.*]] -; IND: loop: -; IND-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; IND: exit: ; IND-NEXT: ret void ; @@ -5900,11 +5762,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr % ; UNROLL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; UNROLL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]] ; UNROLL: middle.block: -; UNROLL-NEXT: br label [[EXIT:%.*]] -; UNROLL: scalar.ph: ; UNROLL-NEXT: br label [[LOOP:%.*]] -; UNROLL: loop: -; UNROLL-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; UNROLL: exit: ; UNROLL-NEXT: ret void ; @@ -5937,23 +5795,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr % ; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; UNROLL-NO-IC-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]] ; UNROLL-NO-IC: middle.block: -; UNROLL-NO-IC-NEXT: br label [[EXIT:%.*]] -; UNROLL-NO-IC: scalar.ph: ; UNROLL-NO-IC-NEXT: br label [[LOOP:%.*]] -; UNROLL-NO-IC: loop: -; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[TRUNC_IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[TRUNC_IV_NEXT:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[RECUR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[LV:%.*]] = load i32, ptr [[SRC]], align 4 -; UNROLL-NO-IC-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[RECUR]] -; UNROLL-NO-IC-NEXT: [[TRUNC_IV_NEXT]] = add i32 [[TRUNC_IV]], 1 -; UNROLL-NO-IC-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; UNROLL-NO-IC-NEXT: [[IV_TRUNC]] = trunc i64 [[IV]] to i32 -; UNROLL-NO-IC-NEXT: [[DST_GEP:%.*]] = getelementptr i32, ptr [[DST]], i32 [[IV_TRUNC]] -; UNROLL-NO-IC-NEXT: [[ADD:%.*]] = add i32 [[IV_TRUNC]], [[MUL]] -; UNROLL-NO-IC-NEXT: store i32 [[ADD]], ptr [[DST_GEP]], align 4 -; UNROLL-NO-IC-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[TRUNC_IV_NEXT]], 100 -; UNROLL-NO-IC-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]] ; UNROLL-NO-IC: exit: ; UNROLL-NO-IC-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/instruction-only-used-outside-of-loop.ll b/llvm/test/Transforms/LoopVectorize/instruction-only-used-outside-of-loop.ll index 9222af9..8975c05 100644 --- a/llvm/test/Transforms/LoopVectorize/instruction-only-used-outside-of-loop.ll +++ b/llvm/test/Transforms/LoopVectorize/instruction-only-used-outside-of-loop.ll @@ -18,23 +18,9 @@ define i32 @one_direct_branch(ptr %src) { ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP3]], i32 3 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[SRC_GEP]], align 4 -; CHECK-NEXT: [[XOR:%.*]] = xor i32 25500, [[LV]] -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[PHI_XOR:%.*]] = phi i32 [ [[XOR]], [[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 -; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: -; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[PHI_XOR]], [[LOOP_LATCH]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[XOR_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP5]] ; entry: br label %loop @@ -73,26 +59,9 @@ define i32 @two_direct_branch(ptr %src) { ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP3]], i32 3 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[SRC_GEP]], align 4 -; CHECK-NEXT: [[XOR:%.*]] = xor i32 25500, [[LV]] -; CHECK-NEXT: br label [[BB:%.*]] -; CHECK: bb: -; CHECK-NEXT: [[PHI_XOR_1:%.*]] = phi i32 [ [[XOR]], [[LOOP]] ] -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[PHI_XOR:%.*]] = phi i32 [ [[PHI_XOR_1]], [[BB]] ] -; CHECK-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 -; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: -; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[PHI_XOR]], [[LOOP_LATCH]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[XOR_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP5]] ; entry: br label %loop @@ -141,26 +110,9 @@ define i32 @cond_branch(i32 %a, ptr %src) { ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[PREDPHI]], i32 3 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[SRC_GEP]], align 4 -; CHECK-NEXT: [[XOR:%.*]] = xor i32 25500, [[LV]] -; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_LATCH]], label [[THEN:%.*]] -; CHECK: then: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[PHI_XOR:%.*]] = phi i32 [ [[XOR]], [[LOOP]] ], [ 10, [[THEN]] ] -; CHECK-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 -; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: -; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[PHI_XOR]], [[LOOP_LATCH]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[XOR_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP7]] ; entry: br label %loop @@ -205,18 +157,9 @@ define i32 @optimizable_trunc_used_outside() { ; CHECK-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i32> [[VEC_IND]], i32 3 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT_I_I:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT_I_I]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[IV_TRUNC_LCSSA:%.*]] = phi i32 [ [[IV_TRUNC]], [[LOOP]] ], [ [[TMP1]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[IV_TRUNC_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP1]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/interleave-with-i65-induction.ll b/llvm/test/Transforms/LoopVectorize/interleave-with-i65-induction.ll index 1128dd3..2c97bb7 100644 --- a/llvm/test/Transforms/LoopVectorize/interleave-with-i65-induction.ll +++ b/llvm/test/Transforms/LoopVectorize/interleave-with-i65-induction.ll @@ -33,19 +33,6 @@ define void @i65_induction_with_negative_step(ptr %dst) { ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_I65:%.*]] = phi i65 [ 0, %[[SCALAR_PH]] ], [ [[IV_I65_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[FOR:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TRUNC:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[TRUNC]] = trunc i65 [[IV_I65]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TRUNC]] -; CHECK-NEXT: store i64 [[FOR]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: [[IV_I65_NEXT]] = add i65 [[IV_I65]], -1 -; CHECK-NEXT: br i1 [[ICMP]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-different-insert-position.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-different-insert-position.ll index 85e7477..eca9c1f 100644 --- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-different-insert-position.ll +++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-different-insert-position.ll @@ -27,23 +27,6 @@ define void @gep_for_first_member_does_not_dominate_insert_point(ptr %str, ptr n ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV2:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV2_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[OR_1:%.*]] = or disjoint i64 [[IV2]], 1 -; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i8, ptr [[STR]], i64 [[OR_1]] -; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[GEP1]], align 1 -; CHECK-NEXT: [[GEP0:%.*]] = getelementptr i8, ptr [[STR]], i64 [[IV2]] -; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr [[GEP0]], align 1 -; CHECK-NEXT: [[ADD:%.*]] = add i8 [[TMP9]], [[TMP10]] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i8 [[ADD]], ptr [[GEP_DST]], align 1 -; CHECK-NEXT: [[IV2_NEXT]] = add i64 [[IV2]], 2 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-metadata.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-metadata.ll index 4dc9cfd..bd0fd77 100644 --- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-metadata.ll +++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-metadata.ll @@ -45,23 +45,6 @@ define void @merge_tbaa_interleave_group(ptr nocapture readonly %p, ptr noalias ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_VEC4R]], ptr [[P]], i64 [[IV]], i32 0 -; CHECK-NEXT: [[TMP19:%.*]] = load double, ptr [[X]], align 8, !tbaa [[TBAA0]] -; CHECK-NEXT: [[MUL:%.*]] = fmul double [[TMP19]], 2.000000e+00 -; CHECK-NEXT: [[X4:%.*]] = getelementptr inbounds [20 x %struct.Vec2r], ptr [[CP]], i64 0, i64 [[IV]], i32 0 -; CHECK-NEXT: store double [[MUL]], ptr [[X4]], align 8, !tbaa [[TBAA10:![0-9]+]] -; CHECK-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_VEC4R]], ptr [[P]], i64 [[IV]], i32 1 -; CHECK-NEXT: [[TMP20:%.*]] = load double, ptr [[Y]], align 8, !tbaa [[TBAA5]] -; CHECK-NEXT: [[MUL7:%.*]] = fmul double [[TMP20]], 3.000000e+00 -; CHECK-NEXT: [[Y10:%.*]] = getelementptr inbounds [20 x %struct.Vec2r], ptr [[CP]], i64 0, i64 [[IV]], i32 1 -; CHECK-NEXT: store double [[MUL7]], ptr [[Y10]], align 8, !tbaa [[TBAA12:![0-9]+]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 4 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -126,20 +109,20 @@ define void @ir_tbaa_different(ptr %base, ptr %end, ptr %src) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[SRC]], align 4, !alias.scope [[META13:![0-9]+]] +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[SRC]], align 4, !alias.scope [[META10:![0-9]+]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[TMP11]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x float> [[BROADCAST_SPLATINSERT]], <2 x float> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x float>, ptr [[NEXT_GEP]], align 4, !alias.scope [[META16:![0-9]+]], !noalias [[META13]] +; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x float>, ptr [[NEXT_GEP]], align 4, !alias.scope [[META13:![0-9]+]], !noalias [[META10]] ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <4 x float> [[WIDE_VEC]], <4 x float> poison, <2 x i32> <i32 0, i32 2> ; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <4 x float> [[WIDE_VEC]], <4 x float> poison, <2 x i32> <i32 1, i32 3> ; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x float> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP7:%.*]] = fmul <2 x float> [[STRIDED_VEC3]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> [[TMP7]], <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x float> [[TMP8]], <4 x float> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3> -; CHECK-NEXT: store <4 x float> [[INTERLEAVED_VEC]], ptr [[NEXT_GEP]], align 4, !alias.scope [[META16]], !noalias [[META13]] +; CHECK-NEXT: store <4 x float> [[INTERLEAVED_VEC]], ptr [[NEXT_GEP]], align 4, !alias.scope [[META13]], !noalias [[META10]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -152,9 +135,9 @@ define void @ir_tbaa_different(ptr %base, ptr %end, ptr %src) { ; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr inbounds nuw i8, ptr [[PTR_IV]], i64 8 ; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[PTR_IV]], align 4 ; CHECK-NEXT: [[MUL_1:%.*]] = fmul float [[L_1]], [[L_INVAR]] -; CHECK-NEXT: store float [[MUL_1]], ptr [[PTR_IV]], align 4, !tbaa [[TBAA10]] +; CHECK-NEXT: store float [[MUL_1]], ptr [[PTR_IV]], align 4, !tbaa [[TBAA16:![0-9]+]] ; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR_IV]], i64 4 -; CHECK-NEXT: [[L_2:%.*]] = load float, ptr [[GEP_1]], align 4, !tbaa [[TBAA12]] +; CHECK-NEXT: [[L_2:%.*]] = load float, ptr [[GEP_1]], align 4, !tbaa [[TBAA18:![0-9]+]] ; CHECK-NEXT: [[MUL_2:%.*]] = fmul float [[L_2]], [[L_INVAR]] ; CHECK-NEXT: store float [[MUL_2]], ptr [[GEP_1]], align 4 ; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]] @@ -278,15 +261,15 @@ exit: ; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META8:![0-9]+]], [[META9:![0-9]+]]} ; CHECK: [[META8]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META9]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[TBAA10]] = !{[[META11:![0-9]+]], [[META2]], i64 0} -; CHECK: [[META11]] = !{!"Vec2r", [[META2]], i64 0, [[META2]], i64 8} -; CHECK: [[TBAA12]] = !{[[META11]], [[META2]], i64 8} +; CHECK: [[META10]] = !{[[META11:![0-9]+]]} +; CHECK: [[META11]] = distinct !{[[META11]], [[META12:![0-9]+]]} +; CHECK: [[META12]] = distinct !{[[META12]], !"LVerDomain"} ; CHECK: [[META13]] = !{[[META14:![0-9]+]]} -; CHECK: [[META14]] = distinct !{[[META14]], [[META15:![0-9]+]]} -; CHECK: [[META15]] = distinct !{[[META15]], !"LVerDomain"} -; CHECK: [[META16]] = !{[[META17:![0-9]+]]} -; CHECK: [[META17]] = distinct !{[[META17]], [[META15]]} -; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META8]], [[META9]]} +; CHECK: [[META14]] = distinct !{[[META14]], [[META12]]} +; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META8]], [[META9]]} +; CHECK: [[TBAA16]] = !{[[META17:![0-9]+]], [[META2]], i64 0} +; CHECK: [[META17]] = !{!"Vec2r", [[META2]], i64 0, [[META2]], i64 8} +; CHECK: [[TBAA18]] = !{[[META17]], [[META2]], i64 8} ; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META8]]} ; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META8]], [[META9]]} ; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META9]], [[META8]]} diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll index 4885dd2..b4cad11 100644 --- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll @@ -47,11 +47,7 @@ define void @test_array_load2_store2(i32 %C, i32 %D) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_END]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -124,11 +120,7 @@ define void @test_struct_array_load3_store3() { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -206,11 +198,7 @@ define i32 @test_struct_load4(ptr nocapture readonly %S) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[SUB8_LCSSA:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: ret i32 [[SUB8_LCSSA]] @@ -279,13 +267,9 @@ define void @test_struct_store4(ptr noalias nocapture readonly %A, ptr noalias n ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; entry: br label %for.body @@ -365,13 +349,9 @@ define void @test_reversed_load2_store2(ptr noalias nocapture readonly %A, ptr n ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_COND_CLEANUP]] ; entry: br label %for.body @@ -619,11 +599,7 @@ define void @load_gap_reverse(ptr noalias nocapture %P1, ptr noalias nocapture % ; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_EXIT]] ; CHECK: for.exit: ; CHECK-NEXT: ret void ; @@ -681,13 +657,9 @@ define void @mixed_load2_store2(ptr noalias nocapture readonly %A, ptr noalias n ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_COND_CLEANUP]] ; entry: br label %for.body @@ -753,13 +725,9 @@ define void @mixed_load3_store3(ptr nocapture %A) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; entry: br label %for.body @@ -836,17 +804,13 @@ define void @int_float_struct(ptr nocapture readonly %A) #0 { ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[ADD3_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]]) ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]]) +; CHECK-NEXT: [[ADD3_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]]) ; CHECK-NEXT: store i32 [[ADD_LCSSA]], ptr @SA, align 4 ; CHECK-NEXT: store float [[ADD3_LCSSA]], ptr @SB, align 4 ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/is_fpclass.ll b/llvm/test/Transforms/LoopVectorize/is_fpclass.ll index ab70c14..6c4ee5b7 100644 --- a/llvm/test/Transforms/LoopVectorize/is_fpclass.ll +++ b/llvm/test/Transforms/LoopVectorize/is_fpclass.ll @@ -20,19 +20,7 @@ define void @d() { ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[I7:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[I3:%.*]] = load float, ptr null, align 4 -; CHECK-NEXT: [[I4:%.*]] = getelementptr float, ptr @d, i64 [[I]] -; CHECK-NEXT: [[I5:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[I3]], i32 0) -; CHECK-NEXT: [[I6:%.*]] = select i1 [[I5]], float 0.000000e+00, float 1.000000e+00 -; CHECK-NEXT: store float [[I6]], ptr [[I4]], align 4 -; CHECK-NEXT: [[I7]] = add i64 [[I]], 1 -; CHECK-NEXT: [[I8:%.*]] = icmp eq i64 [[I7]], 128 -; CHECK-NEXT: br i1 [[I8]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll index e662039..70b1ea1 100644 --- a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll +++ b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll @@ -31,21 +31,8 @@ define i64 @select_decreasing_induction_icmp_const_start(ptr %a) { ; IC1VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP6]], 9223372036854775807 ; IC1VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP6]], i64 331 ; IC1VF4-NEXT: br label %[[EXIT:.*]] -; IC1VF4: [[SCALAR_PH:.*]]: -; IC1VF4-NEXT: br label %[[LOOP:.*]] -; IC1VF4: [[LOOP]]: -; IC1VF4-NEXT: [[IV:%.*]] = phi i64 [ 19999, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; IC1VF4-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8 -; IC1VF4-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3 -; IC1VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]] -; IC1VF4-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; IC1VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0 -; IC1VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC1VF4: [[EXIT]]: -; IC1VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC1VF4-NEXT: ret i64 [[SPEC_SELECT_LCSSA]] +; IC1VF4-NEXT: ret i64 [[RDX_SELECT]] ; ; IC4VF4-LABEL: define i64 @select_decreasing_induction_icmp_const_start( ; IC4VF4-SAME: ptr [[A:%.*]]) { @@ -101,21 +88,8 @@ define i64 @select_decreasing_induction_icmp_const_start(ptr %a) { ; IC4VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP18]], 9223372036854775807 ; IC4VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP18]], i64 331 ; IC4VF4-NEXT: br label %[[EXIT:.*]] -; IC4VF4: [[SCALAR_PH:.*]]: -; IC4VF4-NEXT: br label %[[LOOP:.*]] -; IC4VF4: [[LOOP]]: -; IC4VF4-NEXT: [[IV:%.*]] = phi i64 [ 19999, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; IC4VF4-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8 -; IC4VF4-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3 -; IC4VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]] -; IC4VF4-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; IC4VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0 -; IC4VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF4: [[EXIT]]: -; IC4VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF4-NEXT: ret i64 [[SPEC_SELECT_LCSSA]] +; IC4VF4-NEXT: ret i64 [[RDX_SELECT]] ; ; IC4VF1-LABEL: define i64 @select_decreasing_induction_icmp_const_start( ; IC4VF1-SAME: ptr [[A:%.*]]) { @@ -159,21 +133,8 @@ define i64 @select_decreasing_induction_icmp_const_start(ptr %a) { ; IC4VF1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[RDX_MINMAX5]], 9223372036854775807 ; IC4VF1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[RDX_MINMAX5]], i64 331 ; IC4VF1-NEXT: br label %[[EXIT:.*]] -; IC4VF1: [[SCALAR_PH:.*]]: -; IC4VF1-NEXT: br label %[[LOOP:.*]] -; IC4VF1: [[LOOP]]: -; IC4VF1-NEXT: [[IV:%.*]] = phi i64 [ 19999, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; IC4VF1-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8 -; IC4VF1-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3 -; IC4VF1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]] -; IC4VF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; IC4VF1-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0 -; IC4VF1-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF1: [[EXIT]]: -; IC4VF1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF1-NEXT: ret i64 [[SPEC_SELECT_LCSSA]] +; IC4VF1-NEXT: ret i64 [[RDX_SELECT]] ; entry: br label %loop @@ -227,21 +188,8 @@ define i16 @select_decreasing_induction_icmp_table_i16(i16 noundef %val) { ; IC1VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[TMP7]], 32767 ; IC1VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[TMP7]], i16 0 ; IC1VF4-NEXT: br label %[[EXIT:.*]] -; IC1VF4: [[SCALAR_PH:.*]]: -; IC1VF4-NEXT: br label %[[LOOP:.*]] -; IC1VF4: [[LOOP]]: -; IC1VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]] -; IC1VF4-NEXT: [[LD_TABLE:%.*]] = load i16, ptr [[GEP_TABLE_IV]], align 1 -; IC1VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = icmp ugt i16 [[LD_TABLE]], [[VAL]] -; IC1VF4-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1 -; IC1VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]] -; IC1VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; IC1VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC1VF4: [[EXIT]]: -; IC1VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC1VF4-NEXT: ret i16 [[SPEC_SELECT_LCSSA]] +; IC1VF4-NEXT: ret i16 [[RDX_SELECT]] ; ; IC4VF4-LABEL: define i16 @select_decreasing_induction_icmp_table_i16( ; IC4VF4-SAME: i16 noundef [[VAL:%.*]]) { @@ -460,21 +408,8 @@ define i16 @select_decreasing_induction_icmp_table_i16(i16 noundef %val) { ; IC4VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[TMP116]], 32767 ; IC4VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[TMP116]], i16 0 ; IC4VF4-NEXT: br label %[[EXIT:.*]] -; IC4VF4: [[SCALAR_PH:.*]]: -; IC4VF4-NEXT: br label %[[LOOP:.*]] -; IC4VF4: [[LOOP]]: -; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]] -; IC4VF4-NEXT: [[LD_TABLE:%.*]] = load i16, ptr [[GEP_TABLE_IV]], align 1 -; IC4VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = icmp ugt i16 [[LD_TABLE]], [[VAL]] -; IC4VF4-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1 -; IC4VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]] -; IC4VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; IC4VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF4: [[EXIT]]: -; IC4VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF4-NEXT: ret i16 [[SPEC_SELECT_LCSSA]] +; IC4VF4-NEXT: ret i16 [[RDX_SELECT]] ; ; IC4VF1-LABEL: define i16 @select_decreasing_induction_icmp_table_i16( ; IC4VF1-SAME: i16 noundef [[VAL:%.*]]) { @@ -523,21 +458,8 @@ define i16 @select_decreasing_induction_icmp_table_i16(i16 noundef %val) { ; IC4VF1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[RDX_MINMAX5]], 32767 ; IC4VF1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[RDX_MINMAX5]], i16 0 ; IC4VF1-NEXT: br label %[[EXIT:.*]] -; IC4VF1: [[SCALAR_PH:.*]]: -; IC4VF1-NEXT: br label %[[LOOP:.*]] -; IC4VF1: [[LOOP]]: -; IC4VF1-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]] -; IC4VF1-NEXT: [[LD_TABLE:%.*]] = load i16, ptr [[GEP_TABLE_IV]], align 1 -; IC4VF1-NEXT: [[CMP_TABLE_VAL:%.*]] = icmp ugt i16 [[LD_TABLE]], [[VAL]] -; IC4VF1-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1 -; IC4VF1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]] -; IC4VF1-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; IC4VF1-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF1: [[EXIT]]: -; IC4VF1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF1-NEXT: ret i16 [[SPEC_SELECT_LCSSA]] +; IC4VF1-NEXT: ret i16 [[RDX_SELECT]] ; entry: br label %loop @@ -592,21 +514,8 @@ define i16 @select_decreasing_induction_icmp_table_half(half noundef %val) { ; IC1VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[TMP7]], 32767 ; IC1VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[TMP7]], i16 0 ; IC1VF4-NEXT: br label %[[EXIT:.*]] -; IC1VF4: [[SCALAR_PH:.*]]: -; IC1VF4-NEXT: br label %[[LOOP:.*]] -; IC1VF4: [[LOOP]]: -; IC1VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]] -; IC1VF4-NEXT: [[LD_TABLE:%.*]] = load half, ptr [[GEP_TABLE_IV]], align 1 -; IC1VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = fcmp ugt half [[LD_TABLE]], [[VAL]] -; IC1VF4-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1 -; IC1VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]] -; IC1VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; IC1VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC1VF4: [[EXIT]]: -; IC1VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC1VF4-NEXT: ret i16 [[SPEC_SELECT_LCSSA]] +; IC1VF4-NEXT: ret i16 [[RDX_SELECT]] ; ; IC4VF4-LABEL: define i16 @select_decreasing_induction_icmp_table_half( ; IC4VF4-SAME: half noundef [[VAL:%.*]]) { @@ -825,21 +734,8 @@ define i16 @select_decreasing_induction_icmp_table_half(half noundef %val) { ; IC4VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[TMP116]], 32767 ; IC4VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[TMP116]], i16 0 ; IC4VF4-NEXT: br label %[[EXIT:.*]] -; IC4VF4: [[SCALAR_PH:.*]]: -; IC4VF4-NEXT: br label %[[LOOP:.*]] -; IC4VF4: [[LOOP]]: -; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]] -; IC4VF4-NEXT: [[LD_TABLE:%.*]] = load half, ptr [[GEP_TABLE_IV]], align 1 -; IC4VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = fcmp ugt half [[LD_TABLE]], [[VAL]] -; IC4VF4-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1 -; IC4VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]] -; IC4VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; IC4VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF4: [[EXIT]]: -; IC4VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF4-NEXT: ret i16 [[SPEC_SELECT_LCSSA]] +; IC4VF4-NEXT: ret i16 [[RDX_SELECT]] ; ; IC4VF1-LABEL: define i16 @select_decreasing_induction_icmp_table_half( ; IC4VF1-SAME: half noundef [[VAL:%.*]]) { @@ -888,21 +784,8 @@ define i16 @select_decreasing_induction_icmp_table_half(half noundef %val) { ; IC4VF1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[RDX_MINMAX5]], 32767 ; IC4VF1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[RDX_MINMAX5]], i16 0 ; IC4VF1-NEXT: br label %[[EXIT:.*]] -; IC4VF1: [[SCALAR_PH:.*]]: -; IC4VF1-NEXT: br label %[[LOOP:.*]] -; IC4VF1: [[LOOP]]: -; IC4VF1-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]] -; IC4VF1-NEXT: [[LD_TABLE:%.*]] = load half, ptr [[GEP_TABLE_IV]], align 1 -; IC4VF1-NEXT: [[CMP_TABLE_VAL:%.*]] = fcmp ugt half [[LD_TABLE]], [[VAL]] -; IC4VF1-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1 -; IC4VF1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]] -; IC4VF1-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; IC4VF1-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF1: [[EXIT]]: -; IC4VF1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF1-NEXT: ret i16 [[SPEC_SELECT_LCSSA]] +; IC4VF1-NEXT: ret i16 [[RDX_SELECT]] ; entry: br label %loop @@ -954,21 +837,8 @@ define i64 @select_decreasing_induction_icmp_iv_unsigned(ptr %a) { ; IC1VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP6]], -1 ; IC1VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP6]], i64 331 ; IC1VF4-NEXT: br label %[[EXIT:.*]] -; IC1VF4: [[SCALAR_PH:.*]]: -; IC1VF4-NEXT: br label %[[LOOP:.*]] -; IC1VF4: [[LOOP]]: -; IC1VF4-NEXT: [[IV:%.*]] = phi i64 [ 9223372036854775807, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; IC1VF4-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8 -; IC1VF4-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3 -; IC1VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]] -; IC1VF4-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; IC1VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0 -; IC1VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC1VF4: [[EXIT]]: -; IC1VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC1VF4-NEXT: ret i64 [[SPEC_SELECT_LCSSA]] +; IC1VF4-NEXT: ret i64 [[RDX_SELECT]] ; ; IC4VF4-LABEL: define i64 @select_decreasing_induction_icmp_iv_unsigned( ; IC4VF4-SAME: ptr [[A:%.*]]) { @@ -1024,21 +894,8 @@ define i64 @select_decreasing_induction_icmp_iv_unsigned(ptr %a) { ; IC4VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP18]], -1 ; IC4VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP18]], i64 331 ; IC4VF4-NEXT: br label %[[EXIT:.*]] -; IC4VF4: [[SCALAR_PH:.*]]: -; IC4VF4-NEXT: br label %[[LOOP:.*]] -; IC4VF4: [[LOOP]]: -; IC4VF4-NEXT: [[IV:%.*]] = phi i64 [ 9223372036854775807, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; IC4VF4-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8 -; IC4VF4-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3 -; IC4VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]] -; IC4VF4-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; IC4VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0 -; IC4VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF4: [[EXIT]]: -; IC4VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF4-NEXT: ret i64 [[SPEC_SELECT_LCSSA]] +; IC4VF4-NEXT: ret i64 [[RDX_SELECT]] ; ; IC4VF1-LABEL: define i64 @select_decreasing_induction_icmp_iv_unsigned( ; IC4VF1-SAME: ptr [[A:%.*]]) { @@ -1082,21 +939,8 @@ define i64 @select_decreasing_induction_icmp_iv_unsigned(ptr %a) { ; IC4VF1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[RDX_MINMAX5]], -1 ; IC4VF1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[RDX_MINMAX5]], i64 331 ; IC4VF1-NEXT: br label %[[EXIT:.*]] -; IC4VF1: [[SCALAR_PH:.*]]: -; IC4VF1-NEXT: br label %[[LOOP:.*]] -; IC4VF1: [[LOOP]]: -; IC4VF1-NEXT: [[IV:%.*]] = phi i64 [ 9223372036854775807, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; IC4VF1-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8 -; IC4VF1-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3 -; IC4VF1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]] -; IC4VF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; IC4VF1-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0 -; IC4VF1-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF1: [[EXIT]]: -; IC4VF1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF1-NEXT: ret i64 [[SPEC_SELECT_LCSSA]] +; IC4VF1-NEXT: ret i64 [[RDX_SELECT]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-trunc.ll b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-trunc.ll index 0ace547..b991d58 100644 --- a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-trunc.ll +++ b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-trunc.ll @@ -261,22 +261,8 @@ define i32 @select_icmp_const_truncated_iv_const_exit(ptr %a) { ; CHECK-VF4IC1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP6]], -2147483648 ; CHECK-VF4IC1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP6]], i32 331 ; CHECK-VF4IC1-NEXT: br label %[[EXIT:.*]] -; CHECK-VF4IC1: [[SCALAR_PH:.*]]: -; CHECK-VF4IC1-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF4IC1: [[FOR_BODY]]: -; CHECK-VF4IC1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC1-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-VF4IC1-NEXT: [[TMP7:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-VF4IC1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP7]], 3 -; CHECK-VF4IC1-NEXT: [[TMP8:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF4IC1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP8]], i32 [[RDX]] -; CHECK-VF4IC1-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF4IC1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 20000 -; CHECK-VF4IC1-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF4IC1: [[EXIT]]: -; CHECK-VF4IC1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF4IC1-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF4IC1-NEXT: ret i32 [[RDX_SELECT]] ; ; CHECK-VF4IC4-LABEL: define i32 @select_icmp_const_truncated_iv_const_exit( ; CHECK-VF4IC4-SAME: ptr [[A:%.*]]) { @@ -322,22 +308,8 @@ define i32 @select_icmp_const_truncated_iv_const_exit(ptr %a) { ; CHECK-VF4IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP15]], -2147483648 ; CHECK-VF4IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP15]], i32 331 ; CHECK-VF4IC4-NEXT: br label %[[EXIT:.*]] -; CHECK-VF4IC4: [[SCALAR_PH:.*]]: -; CHECK-VF4IC4-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF4IC4: [[FOR_BODY]]: -; CHECK-VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC4-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-VF4IC4-NEXT: [[TMP16:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-VF4IC4-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP16]], 3 -; CHECK-VF4IC4-NEXT: [[TMP17:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF4IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP17]], i32 [[RDX]] -; CHECK-VF4IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF4IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 20000 -; CHECK-VF4IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF4IC4: [[EXIT]]: -; CHECK-VF4IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF4IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF4IC4-NEXT: ret i32 [[RDX_SELECT]] ; ; CHECK-VF1IC4-LABEL: define i32 @select_icmp_const_truncated_iv_const_exit( ; CHECK-VF1IC4-SAME: ptr [[A:%.*]]) { @@ -384,22 +356,8 @@ define i32 @select_icmp_const_truncated_iv_const_exit(ptr %a) { ; CHECK-VF1IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[RDX_MINMAX5]], -2147483648 ; CHECK-VF1IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[RDX_MINMAX5]], i32 331 ; CHECK-VF1IC4-NEXT: br label %[[EXIT:.*]] -; CHECK-VF1IC4: [[SCALAR_PH:.*]]: -; CHECK-VF1IC4-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF1IC4: [[FOR_BODY]]: -; CHECK-VF1IC4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF1IC4-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF1IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-VF1IC4-NEXT: [[TMP26:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-VF1IC4-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP26]], 3 -; CHECK-VF1IC4-NEXT: [[TMP27:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF1IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP27]], i32 [[RDX]] -; CHECK-VF1IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF1IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 20000 -; CHECK-VF1IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF1IC4: [[EXIT]]: -; CHECK-VF1IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF1IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF1IC4-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body @@ -446,22 +404,8 @@ define i32 @select_fcmp_max_valid_const_ub(ptr %a) { ; CHECK-VF4IC1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP6]], -2147483648 ; CHECK-VF4IC1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP6]], i32 -1 ; CHECK-VF4IC1-NEXT: br label %[[EXIT:.*]] -; CHECK-VF4IC1: [[SCALAR_PH:.*]]: -; CHECK-VF4IC1-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF4IC1: [[FOR_BODY]]: -; CHECK-VF4IC1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC1-NEXT: [[RDX:%.*]] = phi i32 [ -1, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-VF4IC1-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-VF4IC1-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP7]], 0.000000e+00 -; CHECK-VF4IC1-NEXT: [[TMP8:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF4IC1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP8]], i32 [[RDX]] -; CHECK-VF4IC1-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF4IC1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 2147483648 -; CHECK-VF4IC1-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF4IC1: [[EXIT]]: -; CHECK-VF4IC1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF4IC1-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF4IC1-NEXT: ret i32 [[RDX_SELECT]] ; ; CHECK-VF4IC4-LABEL: define i32 @select_fcmp_max_valid_const_ub( ; CHECK-VF4IC4-SAME: ptr [[A:%.*]]) { @@ -507,22 +451,8 @@ define i32 @select_fcmp_max_valid_const_ub(ptr %a) { ; CHECK-VF4IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP15]], -2147483648 ; CHECK-VF4IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP15]], i32 -1 ; CHECK-VF4IC4-NEXT: br label %[[EXIT:.*]] -; CHECK-VF4IC4: [[SCALAR_PH:.*]]: -; CHECK-VF4IC4-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF4IC4: [[FOR_BODY]]: -; CHECK-VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC4-NEXT: [[RDX:%.*]] = phi i32 [ -1, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-VF4IC4-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-VF4IC4-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP16]], 0.000000e+00 -; CHECK-VF4IC4-NEXT: [[TMP17:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF4IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP17]], i32 [[RDX]] -; CHECK-VF4IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF4IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 2147483648 -; CHECK-VF4IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF4IC4: [[EXIT]]: -; CHECK-VF4IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF4IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF4IC4-NEXT: ret i32 [[RDX_SELECT]] ; ; CHECK-VF1IC4-LABEL: define i32 @select_fcmp_max_valid_const_ub( ; CHECK-VF1IC4-SAME: ptr [[A:%.*]]) { @@ -569,22 +499,8 @@ define i32 @select_fcmp_max_valid_const_ub(ptr %a) { ; CHECK-VF1IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[RDX_MINMAX5]], -2147483648 ; CHECK-VF1IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[RDX_MINMAX5]], i32 -1 ; CHECK-VF1IC4-NEXT: br label %[[EXIT:.*]] -; CHECK-VF1IC4: [[SCALAR_PH:.*]]: -; CHECK-VF1IC4-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF1IC4: [[FOR_BODY]]: -; CHECK-VF1IC4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF1IC4-NEXT: [[RDX:%.*]] = phi i32 [ -1, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF1IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-VF1IC4-NEXT: [[TMP26:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-VF1IC4-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP26]], 0.000000e+00 -; CHECK-VF1IC4-NEXT: [[TMP27:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF1IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP27]], i32 [[RDX]] -; CHECK-VF1IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF1IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 2147483648 -; CHECK-VF1IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF1IC4: [[EXIT]]: -; CHECK-VF1IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF1IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF1IC4-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body @@ -636,22 +552,8 @@ define i32 @select_icmp_truncated_unsigned_iv_range(ptr %a) { ; CHECK-VF4IC1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP5]], 0 ; CHECK-VF4IC1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP5]], i32 331 ; CHECK-VF4IC1-NEXT: br label %[[EXIT:.*]] -; CHECK-VF4IC1: [[SCALAR_PH:.*]]: -; CHECK-VF4IC1-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF4IC1: [[FOR_BODY]]: -; CHECK-VF4IC1-NEXT: [[IV1:%.*]] = phi i64 [ 2147483646, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC1-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] -; CHECK-VF4IC1-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4 -; CHECK-VF4IC1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], 3 -; CHECK-VF4IC1-NEXT: [[CONV:%.*]] = trunc i64 [[IV1]] to i32 -; CHECK-VF4IC1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[CONV]], i32 [[RDX]] -; CHECK-VF4IC1-NEXT: [[INC]] = add nuw nsw i64 [[IV1]], 1 -; CHECK-VF4IC1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 4294967294 -; CHECK-VF4IC1-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF4IC1: [[EXIT]]: -; CHECK-VF4IC1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF4IC1-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF4IC1-NEXT: ret i32 [[RDX_SELECT]] ; ; CHECK-VF4IC4-LABEL: define i32 @select_icmp_truncated_unsigned_iv_range( ; CHECK-VF4IC4-SAME: ptr [[A:%.*]]) { @@ -698,22 +600,8 @@ define i32 @select_icmp_truncated_unsigned_iv_range(ptr %a) { ; CHECK-VF4IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP14]], 0 ; CHECK-VF4IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP14]], i32 331 ; CHECK-VF4IC4-NEXT: br label %[[EXIT:.*]] -; CHECK-VF4IC4: [[SCALAR_PH:.*]]: -; CHECK-VF4IC4-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF4IC4: [[FOR_BODY]]: -; CHECK-VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ 2147483646, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC4-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-VF4IC4-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-VF4IC4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP15]], 3 -; CHECK-VF4IC4-NEXT: [[CONV:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF4IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[CONV]], i32 [[RDX]] -; CHECK-VF4IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF4IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 4294967294 -; CHECK-VF4IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF4IC4: [[EXIT]]: -; CHECK-VF4IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF4IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF4IC4-NEXT: ret i32 [[RDX_SELECT]] ; ; CHECK-VF1IC4-LABEL: define i32 @select_icmp_truncated_unsigned_iv_range( ; CHECK-VF1IC4-SAME: ptr [[A:%.*]]) { @@ -762,22 +650,8 @@ define i32 @select_icmp_truncated_unsigned_iv_range(ptr %a) { ; CHECK-VF1IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[RDX_MINMAX6]], 0 ; CHECK-VF1IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[RDX_MINMAX6]], i32 331 ; CHECK-VF1IC4-NEXT: br label %[[EXIT:.*]] -; CHECK-VF1IC4: [[SCALAR_PH:.*]]: -; CHECK-VF1IC4-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF1IC4: [[FOR_BODY]]: -; CHECK-VF1IC4-NEXT: [[IV:%.*]] = phi i64 [ 2147483646, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF1IC4-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF1IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-VF1IC4-NEXT: [[TMP24:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-VF1IC4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP24]], 3 -; CHECK-VF1IC4-NEXT: [[CONV:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF1IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[CONV]], i32 [[RDX]] -; CHECK-VF1IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF1IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 4294967294 -; CHECK-VF1IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF1IC4: [[EXIT]]: -; CHECK-VF1IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF1IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF1IC4-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/iv_outside_user.ll b/llvm/test/Transforms/LoopVectorize/iv_outside_user.ll index 3f91baa..86515eb 100644 --- a/llvm/test/Transforms/LoopVectorize/iv_outside_user.ll +++ b/llvm/test/Transforms/LoopVectorize/iv_outside_user.ll @@ -102,16 +102,8 @@ define i32 @constpre() { ; CHECK-NEXT: br i1 [[TMP0]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INC_PHI:%.*]] = phi i32 [ 32, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[INC]] = sub nsw i32 [[INC_PHI]], 2 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[INC]], 0 -; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[INC_PHI_LCSSA:%.*]] = phi i32 [ [[INC_PHI]], %[[FOR_BODY]] ], [ 2, %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[INC_PHI_LCSSA]] +; CHECK-NEXT: ret i32 2 ; entry: br label %for.body @@ -142,18 +134,8 @@ define ptr @geppre(ptr %ptr) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[IND_ESCAPE:%.*]] = getelementptr i8, ptr [[TMP0]], i64 -16 ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INC_PHI:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[PTR_PHI:%.*]] = phi ptr [ [[PTR]], %[[SCALAR_PH]] ], [ [[INC_PTR:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[INC]] = add nsw i32 [[INC_PHI]], 1 -; CHECK-NEXT: [[INC_PTR]] = getelementptr i32, ptr [[PTR_PHI]], i32 4 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[INC]], 32 -; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[PTR_PHI_LCSSA:%.*]] = phi ptr [ [[PTR_PHI]], %[[FOR_BODY]] ], [ [[IND_ESCAPE]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret ptr [[PTR_PHI_LCSSA]] +; CHECK-NEXT: ret ptr [[IND_ESCAPE]] ; entry: br label %for.body @@ -411,18 +393,8 @@ define i64 @iv_scalar_steps_and_outside_users(ptr %ptr) { ; VEC-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; VEC: [[MIDDLE_BLOCK]]: ; VEC-NEXT: br label %[[EXIT:.*]] -; VEC: [[SCALAR_PH:.*]]: -; VEC-NEXT: br label %[[LOOP:.*]] -; VEC: [[LOOP]]: -; VEC-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VEC-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1 -; VEC-NEXT: [[GEP_PTR:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 [[IV]] -; VEC-NEXT: store i64 [[IV]], ptr [[GEP_PTR]], align 4 -; VEC-NEXT: [[EXITCOND:%.*]] = icmp ugt i64 [[IV]], 1000 -; VEC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; VEC: [[EXIT]]: -; VEC-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ], [ 1001, %[[MIDDLE_BLOCK]] ] -; VEC-NEXT: ret i64 [[IV_LCSSA]] +; VEC-NEXT: ret i64 1001 ; ; INTERLEAVE-LABEL: define i64 @iv_scalar_steps_and_outside_users( ; INTERLEAVE-SAME: ptr [[PTR:%.*]]) { @@ -442,18 +414,8 @@ define i64 @iv_scalar_steps_and_outside_users(ptr %ptr) { ; INTERLEAVE-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1 -; INTERLEAVE-NEXT: [[GEP_PTR:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 [[IV]] -; INTERLEAVE-NEXT: store i64 [[IV]], ptr [[GEP_PTR]], align 4 -; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp ugt i64 [[IV]], 1000 -; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; INTERLEAVE: [[EXIT]]: -; INTERLEAVE-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ], [ 1001, %[[MIDDLE_BLOCK]] ] -; INTERLEAVE-NEXT: ret i64 [[IV_LCSSA]] +; INTERLEAVE-NEXT: ret i64 1001 ; entry: br label %loop @@ -491,20 +453,8 @@ define i32 @iv_2_dead_in_loop_only_used_outside(ptr %ptr) { ; VEC-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; VEC: [[MIDDLE_BLOCK]]: ; VEC-NEXT: br label %[[EXIT:.*]] -; VEC: [[SCALAR_PH:.*]]: -; VEC-NEXT: br label %[[LOOP:.*]] -; VEC: [[LOOP]]: -; VEC-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VEC-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ] -; VEC-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1 -; VEC-NEXT: [[IV_2_NEXT]] = add nuw i32 [[IV_2]], 2 -; VEC-NEXT: [[GEP_PTR:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 [[IV]] -; VEC-NEXT: store i64 [[IV]], ptr [[GEP_PTR]], align 4 -; VEC-NEXT: [[EXITCOND:%.*]] = icmp ugt i64 [[IV]], 1000 -; VEC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; VEC: [[EXIT]]: -; VEC-NEXT: [[IV_2_LCSSA:%.*]] = phi i32 [ [[IV_2]], %[[LOOP]] ], [ 2002, %[[MIDDLE_BLOCK]] ] -; VEC-NEXT: ret i32 [[IV_2_LCSSA]] +; VEC-NEXT: ret i32 2002 ; ; INTERLEAVE-LABEL: define i32 @iv_2_dead_in_loop_only_used_outside( ; INTERLEAVE-SAME: ptr [[PTR:%.*]]) { @@ -524,20 +474,8 @@ define i32 @iv_2_dead_in_loop_only_used_outside(ptr %ptr) { ; INTERLEAVE-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1 -; INTERLEAVE-NEXT: [[IV_2_NEXT]] = add nuw i32 [[IV_2]], 2 -; INTERLEAVE-NEXT: [[GEP_PTR:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 [[IV]] -; INTERLEAVE-NEXT: store i64 [[IV]], ptr [[GEP_PTR]], align 4 -; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp ugt i64 [[IV]], 1000 -; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; INTERLEAVE: [[EXIT]]: -; INTERLEAVE-NEXT: [[IV_2_LCSSA:%.*]] = phi i32 [ [[IV_2]], %[[LOOP]] ], [ 2002, %[[MIDDLE_BLOCK]] ] -; INTERLEAVE-NEXT: ret i32 [[IV_2_LCSSA]] +; INTERLEAVE-NEXT: ret i32 2002 ; entry: br label %loop @@ -1092,18 +1030,8 @@ define i32 @test_iv_uniform_with_outside_use_scev_simplification(ptr %dst) { ; VEC-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; VEC: [[MIDDLE_BLOCK]]: ; VEC-NEXT: br label %[[E_EXIT:.*]] -; VEC: [[SCALAR_PH:.*]]: -; VEC-NEXT: br label %[[LOOP:.*]] -; VEC: [[LOOP]]: -; VEC-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VEC-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i32 [[IV]] -; VEC-NEXT: store i16 0, ptr [[GEP_DST]], align 2 -; VEC-NEXT: [[IV_NEXT]] = add i32 [[STEP_2]], [[IV]] -; VEC-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[IV_NEXT]], 8 -; VEC-NEXT: br i1 [[CMP_I]], label %[[LOOP]], label %[[E_EXIT]] ; VEC: [[E_EXIT]]: -; VEC-NEXT: [[RES:%.*]] = phi i32 [ [[IV_NEXT]], %[[LOOP]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] -; VEC-NEXT: ret i32 [[RES]] +; VEC-NEXT: ret i32 [[TMP5]] ; ; INTERLEAVE-LABEL: define i32 @test_iv_uniform_with_outside_use_scev_simplification( ; INTERLEAVE-SAME: ptr [[DST:%.*]]) { @@ -1126,18 +1054,8 @@ define i32 @test_iv_uniform_with_outside_use_scev_simplification(ptr %dst) { ; INTERLEAVE-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[E_EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i32 [[IV]] -; INTERLEAVE-NEXT: store i16 0, ptr [[GEP_DST]], align 2 -; INTERLEAVE-NEXT: [[IV_NEXT]] = add i32 [[STEP_2]], [[IV]] -; INTERLEAVE-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[IV_NEXT]], 8 -; INTERLEAVE-NEXT: br i1 [[CMP_I]], label %[[LOOP]], label %[[E_EXIT]] ; INTERLEAVE: [[E_EXIT]]: -; INTERLEAVE-NEXT: [[RES:%.*]] = phi i32 [ [[IV_NEXT]], %[[LOOP]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] -; INTERLEAVE-NEXT: ret i32 [[RES]] +; INTERLEAVE-NEXT: ret i32 [[TMP5]] ; entry: %step.1 = sext i8 0 to i32 @@ -1187,19 +1105,8 @@ define i32 @test_iv_uniform_with_outside_use_scev_simplification_2(ptr %dst) { ; VEC: [[MIDDLE_BLOCK]]: ; VEC-NEXT: [[TMP7:%.*]] = extractelement <2 x i32> [[TMP5]], i32 1 ; VEC-NEXT: br label %[[E_EXIT:.*]] -; VEC: [[SCALAR_PH:.*]]: -; VEC-NEXT: br label %[[LOOP:.*]] -; VEC: [[LOOP]]: -; VEC-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VEC-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i32 [[IV]] -; VEC-NEXT: store i16 0, ptr [[GEP_DST]], align 2 -; VEC-NEXT: [[INC:%.*]] = add i32 [[IV]], 1 -; VEC-NEXT: [[IV_NEXT]] = add i32 [[STEP_2]], [[INC]] -; VEC-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[IV_NEXT]], 8 -; VEC-NEXT: br i1 [[CMP_I]], label %[[LOOP]], label %[[E_EXIT]] ; VEC: [[E_EXIT]]: -; VEC-NEXT: [[RES:%.*]] = phi i32 [ [[IV_NEXT]], %[[LOOP]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] -; VEC-NEXT: ret i32 [[RES]] +; VEC-NEXT: ret i32 [[TMP7]] ; ; INTERLEAVE-LABEL: define i32 @test_iv_uniform_with_outside_use_scev_simplification_2( ; INTERLEAVE-SAME: ptr [[DST:%.*]]) { @@ -1224,19 +1131,8 @@ define i32 @test_iv_uniform_with_outside_use_scev_simplification_2(ptr %dst) { ; INTERLEAVE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[E_EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i32 [[IV]] -; INTERLEAVE-NEXT: store i16 0, ptr [[GEP_DST]], align 2 -; INTERLEAVE-NEXT: [[INC:%.*]] = add i32 [[IV]], 1 -; INTERLEAVE-NEXT: [[IV_NEXT]] = add i32 [[STEP_2]], [[INC]] -; INTERLEAVE-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[IV_NEXT]], 8 -; INTERLEAVE-NEXT: br i1 [[CMP_I]], label %[[LOOP]], label %[[E_EXIT]] ; INTERLEAVE: [[E_EXIT]]: -; INTERLEAVE-NEXT: [[RES:%.*]] = phi i32 [ [[IV_NEXT]], %[[LOOP]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] -; INTERLEAVE-NEXT: ret i32 [[RES]] +; INTERLEAVE-NEXT: ret i32 [[TMP5]] ; entry: %step.1 = sext i8 0 to i32 @@ -1356,24 +1252,12 @@ define i64 @test_iv_increment_incremented(ptr %dst) { ; VEC-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr [[TMP1]], i32 -1 ; VEC-NEXT: store <2 x i16> splat (i16 1), ptr [[TMP2]], align 2 ; VEC-NEXT: [[TMP5:%.*]] = add i64 1, -1 -; VEC-NEXT: [[TMP6:%.*]] = add i64 [[TMP5]], 1 +; VEC-NEXT: [[IV_1_NEXT_LCSSA1:%.*]] = add i64 [[TMP5]], 1 ; VEC-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VEC: [[MIDDLE_BLOCK]]: ; VEC-NEXT: br label %[[EXIT:.*]] -; VEC: [[SCALAR_PH:.*]]: -; VEC-NEXT: br label %[[LOOP:.*]] -; VEC: [[LOOP]]: -; VEC-NEXT: [[IV_1:%.*]] = phi i64 [ 3, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ] -; VEC-NEXT: [[IV_2:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ] -; VEC-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV_1]] -; VEC-NEXT: store i16 1, ptr [[GEP]], align 2 -; VEC-NEXT: [[IV_2_NEXT]] = add i64 [[IV_2]], -1 -; VEC-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_2_NEXT]], 0 -; VEC-NEXT: [[IV_1_NEXT]] = add i64 [[IV_2_NEXT]], 1 -; VEC-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; VEC: [[EXIT]]: -; VEC-NEXT: [[IV_1_NEXT_LCSSA:%.*]] = phi i64 [ [[IV_1_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; VEC-NEXT: ret i64 [[IV_1_NEXT_LCSSA]] +; VEC-NEXT: ret i64 [[IV_1_NEXT_LCSSA1]] ; ; INTERLEAVE-LABEL: define i64 @test_iv_increment_incremented( ; INTERLEAVE-SAME: ptr [[DST:%.*]]) { @@ -1387,24 +1271,12 @@ define i64 @test_iv_increment_incremented(ptr %dst) { ; INTERLEAVE-NEXT: store i16 1, ptr [[TMP0]], align 2 ; INTERLEAVE-NEXT: store i16 1, ptr [[TMP1]], align 2 ; INTERLEAVE-NEXT: [[TMP2:%.*]] = add i64 1, -1 -; INTERLEAVE-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], 1 +; INTERLEAVE-NEXT: [[IV_1_NEXT_LCSSA1:%.*]] = add i64 [[TMP2]], 1 ; INTERLEAVE-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV_1:%.*]] = phi i64 [ 3, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[IV_2:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV_1]] -; INTERLEAVE-NEXT: store i16 1, ptr [[GEP]], align 2 -; INTERLEAVE-NEXT: [[IV_2_NEXT]] = add i64 [[IV_2]], -1 -; INTERLEAVE-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_2_NEXT]], 0 -; INTERLEAVE-NEXT: [[IV_1_NEXT]] = add i64 [[IV_2_NEXT]], 1 -; INTERLEAVE-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; INTERLEAVE: [[EXIT]]: -; INTERLEAVE-NEXT: [[IV_1_NEXT_LCSSA:%.*]] = phi i64 [ [[IV_1_NEXT]], %[[LOOP]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ] -; INTERLEAVE-NEXT: ret i64 [[IV_1_NEXT_LCSSA]] +; INTERLEAVE-NEXT: ret i64 [[IV_1_NEXT_LCSSA1]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll b/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll index 11d48df..9358fd9 100644 --- a/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll +++ b/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll @@ -48,29 +48,9 @@ define i16 @test_access_size_not_multiple_of_align(i64 %len, ptr %test_base) { ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP17:%.*]] = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> [[TMP15]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[L_T:%.*]] = load i8, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp sge i8 [[L_T]], 0 -; CHECK-NEXT: br i1 [[CMP]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i16, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i16, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i16 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i16 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp eq i64 [[IV]], 4095 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i16 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i16 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i16 [[TMP17]] ; entry: %alloca = alloca [163840 x i16], align 4 @@ -142,29 +122,9 @@ define i32 @test_access_size_multiple_of_align_but_offset_by_1(i64 %len, ptr %te ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[TMP15]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[L_T:%.*]] = load i8, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp sge i8 [[L_T]], 0 -; CHECK-NEXT: br i1 [[CMP]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[START]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp eq i64 [[IV]], 4095 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP17]] ; entry: %alloca = alloca [163840 x i32], align 4 @@ -370,26 +330,7 @@ define void @test_rev_loops_deref_loads(ptr nocapture noundef writeonly %dest) { ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1023, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_CMP]], i64 0, i64 [[IV]] -; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP3_NOT:%.*]] = icmp eq i32 [[TMP19]], 3 -; CHECK-NEXT: br i1 [[CMP3_NOT]], label [[FOR_INC]], label [[IF_THEN:%.*]] -; CHECK: if.then: -; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[IV]] -; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4 -; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP20]], 2 -; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[IV]] -; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX7]], align 4 -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; CHECK-NEXT: [[CMP2_NOT:%.*]] = icmp eq i64 [[IV]], 0 -; CHECK-NEXT: br i1 [[CMP2_NOT]], label [[EXIT]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: exit: ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DEST:%.*]], ptr [[LOCAL_DEST]], i64 1024, i1 false) ; CHECK-NEXT: ret void @@ -481,27 +422,7 @@ define void @test_rev_loops_non_deref_loads(ptr nocapture noundef writeonly %des ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1023, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-NEXT: [[OFF:%.*]] = add i64 [[IV]], -1 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_CMP]], i64 0, i64 [[OFF]] -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP3_NOT:%.*]] = icmp eq i32 [[TMP22]], 3 -; CHECK-NEXT: br i1 [[CMP3_NOT]], label [[FOR_INC]], label [[IF_THEN:%.*]] -; CHECK: if.then: -; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[OFF]] -; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4 -; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP23]], 2 -; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[OFF]] -; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX7]], align 4 -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; CHECK-NEXT: [[CMP2_NOT:%.*]] = icmp eq i64 [[IV]], 0 -; CHECK-NEXT: br i1 [[CMP2_NOT]], label [[EXIT]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: exit: ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DEST:%.*]], ptr [[LOCAL_DEST]], i64 1024, i1 false) ; CHECK-NEXT: ret void @@ -574,30 +495,9 @@ define i16 @test_strided_access(i64 %len, ptr %test_base) { ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP15:%.*]] = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> [[TMP13]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[L_T:%.*]] = load i8, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp sge i8 [[L_T]], 0 -; CHECK-NEXT: br i1 [[CMP]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[IV_STRIDE:%.*]] = mul i64 [[IV]], 2 -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i16, ptr [[ALLOCA]], i64 [[IV_STRIDE]] -; CHECK-NEXT: [[VAL:%.*]] = load i16, ptr [[ADDR]], align 2 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i16 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i16 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp eq i64 [[IV]], 4095 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i16 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i16 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i16 [[TMP15]] ; entry: %alloca = alloca [163840 x i16], align 4 @@ -681,27 +581,7 @@ define void @test_rev_loops_strided_deref_loads(ptr nocapture noundef writeonly ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512 ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 511, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_CMP]], i64 0, i64 [[IV]] -; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP3_NOT:%.*]] = icmp eq i32 [[TMP21]], 3 -; CHECK-NEXT: br i1 [[CMP3_NOT]], label [[FOR_INC]], label [[IF_THEN:%.*]] -; CHECK: if.then: -; CHECK-NEXT: [[IV_STRIDED:%.*]] = mul i64 [[IV]], 2 -; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[IV_STRIDED]] -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4 -; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP22]], 2 -; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[IV]] -; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX7]], align 4 -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; CHECK-NEXT: [[CMP2_NOT:%.*]] = icmp eq i64 [[IV]], 0 -; CHECK-NEXT: br i1 [[CMP2_NOT]], label [[EXIT]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: exit: ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DEST:%.*]], ptr [[LOCAL_DEST]], i64 1024, i1 false) ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/LoopVectorize/load-deref-pred-neg-off.ll b/llvm/test/Transforms/LoopVectorize/load-deref-pred-neg-off.ll index b224534..b14a1cd 100644 --- a/llvm/test/Transforms/LoopVectorize/load-deref-pred-neg-off.ll +++ b/llvm/test/Transforms/LoopVectorize/load-deref-pred-neg-off.ll @@ -52,28 +52,9 @@ define i8 @test_negative_off(i16 %len, ptr %test_base) { ; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP20:%.*]] = call i8 @llvm.vector.reduce.add.v2i8(<2 x i8> [[TMP18]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ -1000, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i8 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i16 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr i8, ptr [[ALLOCA]], i16 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr [[ADDR]], align 1 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i8 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i8 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i16 [[IV]], -990 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i8 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP20]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i8 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i8 [[TMP20]] ; entry: %alloca = alloca [64638 x i8] diff --git a/llvm/test/Transforms/LoopVectorize/load-of-struct-deref-pred.ll b/llvm/test/Transforms/LoopVectorize/load-of-struct-deref-pred.ll index f44fc4e..096a0a8 100644 --- a/llvm/test/Transforms/LoopVectorize/load-of-struct-deref-pred.ll +++ b/llvm/test/Transforms/LoopVectorize/load-of-struct-deref-pred.ll @@ -30,28 +30,6 @@ define void @accesses_to_struct_dereferenceable(ptr noalias %dst) { ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[GEP_DST]], align 4 -; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i32 [[D]], 0 -; CHECK-NEXT: br i1 [[CMP3]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] -; CHECK: if.then: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @foo, i64 0, i32 0, i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: if.else: -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @foo, i64 0, i32 1, i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[TMP_0:%.*]] = phi i32 [ [[L_A]], [[IF_THEN]] ], [ [[L_B]], [[IF_ELSE]] ] -; CHECK-NEXT: store i32 [[TMP_0]], ptr [[GEP_DST]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 32000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP_HEADER]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -265,29 +243,6 @@ define void @accesses_to_struct_may_not_be_dereferenceable_access_size(ptr noali ; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[GEP_DST]], align 4 -; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i32 [[D]], 0 -; CHECK-NEXT: br i1 [[CMP3]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] -; CHECK: if.then: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @foo, i64 0, i32 0, i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: if.else: -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @foo, i64 0, i32 1, i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i64, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[T:%.*]] = trunc i64 [[L_B]] to i32 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[TMP_0:%.*]] = phi i32 [ [[L_A]], [[IF_THEN]] ], [ [[T]], [[IF_ELSE]] ] -; CHECK-NEXT: store i32 [[TMP_0]], ptr [[GEP_DST]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 32000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP_HEADER]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/loop-form.ll b/llvm/test/Transforms/LoopVectorize/loop-form.ll index c589c77..aed1e29 100644 --- a/llvm/test/Transforms/LoopVectorize/loop-form.ll +++ b/llvm/test/Transforms/LoopVectorize/loop-form.ll @@ -79,17 +79,7 @@ define void @bottom_tested(ptr %p, i32 %n) { ; TAILFOLD-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; TAILFOLD-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; TAILFOLD: middle.block: -; TAILFOLD-NEXT: br label [[IF_END:%.*]] -; TAILFOLD: scalar.ph: ; TAILFOLD-NEXT: br label [[FOR_COND:%.*]] -; TAILFOLD: for.cond: -; TAILFOLD-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[FOR_COND]] ] -; TAILFOLD-NEXT: [[IPROM:%.*]] = sext i32 [[I]] to i64 -; TAILFOLD-NEXT: [[B:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IPROM]] -; TAILFOLD-NEXT: store i16 0, ptr [[B]], align 4 -; TAILFOLD-NEXT: [[INC]] = add nsw i32 [[I]], 1 -; TAILFOLD-NEXT: [[CMP:%.*]] = icmp slt i32 [[I]], [[N]] -; TAILFOLD-NEXT: br i1 [[CMP]], label [[FOR_COND]], label [[IF_END]] ; TAILFOLD: if.end: ; TAILFOLD-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/make-followup-loop-id.ll b/llvm/test/Transforms/LoopVectorize/make-followup-loop-id.ll index 781980d..1fe802f 100644 --- a/llvm/test/Transforms/LoopVectorize/make-followup-loop-id.ll +++ b/llvm/test/Transforms/LoopVectorize/make-followup-loop-id.ll @@ -32,17 +32,6 @@ define void @scalar_loop_dead(ptr noundef captures(none) %a, float noundef %x) { ; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[MUL:%.*]] = fmul float [[X]], [[LOAD]] -; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COMP:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[COMP]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -83,7 +72,7 @@ define void @scalar_loop_live(ptr noundef captures(none) %a, float noundef %x, i ; CHECK-NEXT: store <4 x float> [[TMP1]], ptr [[TMP0]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -98,7 +87,7 @@ define void @scalar_loop_live(ptr noundef captures(none) %a, float noundef %x, i ; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[COMP:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[COMP]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[COMP]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -128,9 +117,6 @@ exit: ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized"} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.count", i32 8} ; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META5:![0-9]+]], [[META6:![0-9]+]]} -; CHECK: [[META5]] = !{!"llvm.loop.vectorize.enable", i1 true} -; CHECK: [[META6]] = !{!"llvm.loop.vectorize.followup_all", [[META1]], [[META2]]} -; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]], [[META3]]} -; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll b/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll index bb51992..30ee480 100644 --- a/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll +++ b/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll @@ -69,19 +69,7 @@ define void @maxvf3() { ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[J:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[J_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[AJ:%.*]] = getelementptr inbounds [18 x i8], ptr @a, i32 0, i32 [[J]] -; CHECK-NEXT: store i8 69, ptr [[AJ]], align 8 -; CHECK-NEXT: [[JP3:%.*]] = add nuw nsw i32 3, [[J]] -; CHECK-NEXT: [[AJP3:%.*]] = getelementptr inbounds [18 x i8], ptr @a, i32 0, i32 [[JP3]] -; CHECK-NEXT: store i8 7, ptr [[AJP3]], align 8 -; CHECK-NEXT: [[J_NEXT]] = add nuw nsw i32 [[J]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[J_NEXT]], 15 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/metadata.ll b/llvm/test/Transforms/LoopVectorize/metadata.ll index e2dadff..3c59a27 100644 --- a/llvm/test/Transforms/LoopVectorize/metadata.ll +++ b/llvm/test/Transforms/LoopVectorize/metadata.ll @@ -142,18 +142,6 @@ define void @widen_call_range(ptr noalias %a, ptr readonly %b) { ; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4, !tbaa [[CHAR_TBAA0]], !range [[RNG9:![0-9]+]] -; CHECK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR1:[0-9]+]], !range [[RNG9]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -180,18 +168,6 @@ define void @widen_call_range(ptr noalias %a, ptr readonly %b) { ; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; INTERLEAVE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4, !tbaa [[CHAR_TBAA0]], !range [[RNG9:![0-9]+]] -; INTERLEAVE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR1:[0-9]+]], !range [[RNG9]] -; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; INTERLEAVE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 -; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; INTERLEAVE: [[EXIT]]: ; INTERLEAVE-NEXT: ret void ; @@ -229,21 +205,9 @@ define void @widen_call_fpmath(ptr noalias %a, ptr readonly %b) { ; CHECK-NEXT: store <2 x double> [[TMP1]], ptr [[TMP3]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8, !tbaa [[CHAR_TBAA0]] -; CHECK-NEXT: [[CALL:%.*]] = call double @bar(double [[LOAD]]) #[[ATTR2:[0-9]+]], !fpmath [[META3]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store double [[CALL]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -267,21 +231,9 @@ define void @widen_call_fpmath(ptr noalias %a, ptr readonly %b) { ; INTERLEAVE-NEXT: store <2 x double> [[TMP4]], ptr [[TMP7]], align 8 ; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[IV]] -; INTERLEAVE-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8, !tbaa [[CHAR_TBAA0]] -; INTERLEAVE-NEXT: [[CALL:%.*]] = call double @bar(double [[LOAD]]) #[[ATTR2:[0-9]+]], !fpmath [[META3]] -; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]] -; INTERLEAVE-NEXT: store double [[CALL]], ptr [[ARRAYIDX]], align 8 -; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; INTERLEAVE: [[EXIT]]: ; INTERLEAVE-NEXT: ret void ; @@ -319,21 +271,9 @@ define void @widen_intrinsic(ptr noalias %a, ptr readonly %b) { ; CHECK-NEXT: store <2 x i64> [[TMP2]], ptr [[TMP3]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4 -; CHECK-NEXT: [[CALL:%.*]] = call i64 @llvm.abs.i64(i64 [[LOAD]], i1 true), !range [[RNG9]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -357,21 +297,9 @@ define void @widen_intrinsic(ptr noalias %a, ptr readonly %b) { ; INTERLEAVE-NEXT: store <2 x i64> [[TMP4]], ptr [[TMP7]], align 4 ; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; INTERLEAVE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4 -; INTERLEAVE-NEXT: [[CALL:%.*]] = call i64 @llvm.abs.i64(i64 [[LOAD]], i1 true), !range [[RNG9]] -; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; INTERLEAVE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 -; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; INTERLEAVE: [[EXIT]]: ; INTERLEAVE-NEXT: ret void ; @@ -409,21 +337,9 @@ define void @widen_intrinsic_fpmath(ptr noalias %a, ptr readonly %b) { ; CHECK-NEXT: store <2 x double> [[TMP1]], ptr [[TMP3]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8, !tbaa [[CHAR_TBAA0]] -; CHECK-NEXT: [[CALL:%.*]] = call double @llvm.sin.f64(double [[LOAD]]) #[[ATTR2]], !fpmath [[META3]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store double [[CALL]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -447,21 +363,9 @@ define void @widen_intrinsic_fpmath(ptr noalias %a, ptr readonly %b) { ; INTERLEAVE-NEXT: store <2 x double> [[TMP4]], ptr [[TMP7]], align 8 ; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[IV]] -; INTERLEAVE-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8, !tbaa [[CHAR_TBAA0]] -; INTERLEAVE-NEXT: [[CALL:%.*]] = call double @llvm.sin.f64(double [[LOAD]]) #[[ATTR2]], !fpmath [[META3]] -; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]] -; INTERLEAVE-NEXT: store double [[CALL]], ptr [[ARRAYIDX]], align 8 -; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; INTERLEAVE: [[EXIT]]: ; INTERLEAVE-NEXT: ret void ; @@ -506,7 +410,7 @@ define void @unknown_metadata(ptr nocapture %a, ptr noalias %b, i64 %size) { ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[VEC_IND_NEXT2]] = add <2 x i32> [[TMP3]], splat (i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SIZE]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -522,7 +426,7 @@ define void @unknown_metadata(ptr nocapture %a, ptr noalias %b, i64 %size) { ; CHECK-NEXT: store ptr [[ARRAYIDX_2]], ptr [[ARRAYIDX_1]], align 8, !custom_md [[META2]] ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !custom_md [[META2]] ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], [[SIZE]], !custom_md [[META2]] -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP14:![0-9]+]], !custom_md [[META2]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP13:![0-9]+]], !custom_md [[META2]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -555,7 +459,7 @@ define void @unknown_metadata(ptr nocapture %a, ptr noalias %b, i64 %size) { ; INTERLEAVE-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], splat (i64 2) ; INTERLEAVE-NEXT: [[VEC_IND_NEXT2]] = add <2 x i32> [[STEP_ADD3]], splat (i32 2) ; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SIZE]], [[N_VEC]] ; INTERLEAVE-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -571,7 +475,7 @@ define void @unknown_metadata(ptr nocapture %a, ptr noalias %b, i64 %size) { ; INTERLEAVE-NEXT: store ptr [[ARRAYIDX_2]], ptr [[ARRAYIDX_1]], align 8, !custom_md [[META2]] ; INTERLEAVE-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !custom_md [[META2]] ; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], [[SIZE]], !custom_md [[META2]] -; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP14:![0-9]+]], !custom_md [[META2]] +; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP13:![0-9]+]], !custom_md [[META2]] ; INTERLEAVE: [[EXIT]]: ; INTERLEAVE-NEXT: ret void ; @@ -617,12 +521,11 @@ attributes #1 = { nounwind "vector-function-abi-variant"="_ZGV_LLVM_N2v_bar(bar_ ; CHECK: [[META6]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META6]], [[META5]]} ; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META5]], [[META6]]} -; CHECK: [[RNG9]] = !{i64 0, i64 2} +; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META5]], [[META6]]} ; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META5]], [[META6]]} ; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META5]], [[META6]]} ; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META5]], [[META6]]} -; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META5]], [[META6]]} -; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META6]], [[META5]]} +; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META6]], [[META5]]} ;. ; INTERLEAVE: [[CHAR_TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0, i64 0} ; INTERLEAVE: [[META1]] = !{!"omnipotent char", [[META2]]} @@ -633,10 +536,9 @@ attributes #1 = { nounwind "vector-function-abi-variant"="_ZGV_LLVM_N2v_bar(bar_ ; INTERLEAVE: [[META6]] = !{!"llvm.loop.unroll.runtime.disable"} ; INTERLEAVE: [[LOOP7]] = distinct !{[[LOOP7]], [[META6]], [[META5]]} ; INTERLEAVE: [[LOOP8]] = distinct !{[[LOOP8]], [[META5]], [[META6]]} -; INTERLEAVE: [[RNG9]] = !{i64 0, i64 2} +; INTERLEAVE: [[LOOP9]] = distinct !{[[LOOP9]], [[META5]], [[META6]]} ; INTERLEAVE: [[LOOP10]] = distinct !{[[LOOP10]], [[META5]], [[META6]]} ; INTERLEAVE: [[LOOP11]] = distinct !{[[LOOP11]], [[META5]], [[META6]]} ; INTERLEAVE: [[LOOP12]] = distinct !{[[LOOP12]], [[META5]], [[META6]]} -; INTERLEAVE: [[LOOP13]] = distinct !{[[LOOP13]], [[META5]], [[META6]]} -; INTERLEAVE: [[LOOP14]] = distinct !{[[LOOP14]], [[META6]], [[META5]]} +; INTERLEAVE: [[LOOP13]] = distinct !{[[LOOP13]], [[META6]], [[META5]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/minimumnum-maximumnum-reductions.ll b/llvm/test/Transforms/LoopVectorize/minimumnum-maximumnum-reductions.ll index 7866728..47a2a84 100644 --- a/llvm/test/Transforms/LoopVectorize/minimumnum-maximumnum-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/minimumnum-maximumnum-reductions.ll @@ -26,20 +26,8 @@ define float @maximumnum_intrinsic(ptr readonly %x) { ; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call <2 x float> @llvm.maximumnum.v2f32(<2 x float> [[TMP3]], <2 x float> [[TMP4]]) ; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.vector.reduce.fmax.v2f32(<2 x float> [[RDX_MINMAX]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[IV1]] -; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP1]], align 4 -; CHECK-NEXT: [[RED_NEXT]] = tail call float @llvm.maximumnum.f32(float [[RED]], float [[L]]) -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[IV1]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[INC]], 1024 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RED_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[TMP6]] ; entry: br label %loop @@ -82,20 +70,8 @@ define float @maximumnum_intrinsic_fast(ptr readonly %x) { ; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call fast <2 x float> @llvm.maximumnum.v2f32(<2 x float> [[TMP3]], <2 x float> [[TMP4]]) ; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fmax.v2f32(<2 x float> [[RDX_MINMAX]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[IV1]] -; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP1]], align 4 -; CHECK-NEXT: [[RED_NEXT]] = tail call fast float @llvm.maximumnum.f32(float [[RED]], float [[L]]) -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[IV1]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[INC]], 1024 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RED_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[TMP6]] ; entry: br label %loop @@ -138,20 +114,8 @@ define float @minimumnum_intrinsic(ptr readonly %x) { ; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call <2 x float> @llvm.minimumnum.v2f32(<2 x float> [[TMP3]], <2 x float> [[TMP4]]) ; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.vector.reduce.fmin.v2f32(<2 x float> [[RDX_MINMAX]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[IV1]] -; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP1]], align 4 -; CHECK-NEXT: [[RED_NEXT]] = tail call float @llvm.minimumnum.f32(float [[RED]], float [[L]]) -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[IV1]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[INC]], 1024 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RED_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[TMP6]] ; entry: br label %loop @@ -194,20 +158,8 @@ define float @minimumnum_intrinsic_fast(ptr readonly %x) { ; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call fast <2 x float> @llvm.minimumnum.v2f32(<2 x float> [[TMP3]], <2 x float> [[TMP4]]) ; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fmin.v2f32(<2 x float> [[RDX_MINMAX]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[IV1]] -; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP1]], align 4 -; CHECK-NEXT: [[RED_NEXT]] = tail call fast float @llvm.minimumnum.f32(float [[RED]], float [[L]]) -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[IV1]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[INC]], 1024 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RED_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[TMP6]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll index 2e88ff6..a1fc1b8 100644 --- a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll +++ b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll @@ -34,10 +34,6 @@ define i32 @main() #0 { ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 0 ; diff --git a/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll index d928a4b..b19f9c5 100644 --- a/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll @@ -12,14 +12,7 @@ define void @sincos_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noali ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1 ; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4 ; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 ; CHECK: [[EXIT:.*:]] ; entry: @@ -55,14 +48,7 @@ define void @sincos_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noali ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1 ; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8 ; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincos.f64(double [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1 -; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8 -; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8 ; CHECK: [[EXIT:.*:]] ; entry: @@ -91,9 +77,9 @@ define void @predicated_sincos(float %x, ptr noalias %in, ptr noalias writeonly ; CHECK-LABEL: define void @predicated_sincos( ; CHECK-SAME: float [[X:%.*]], ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) { ; CHECK: [[ENTRY:.*:]] -; CHECK: [[VECTOR_BODY1:.*]]: -; CHECK: [[VECTOR_BODY:.*:]] -; CHECK: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_BODY1]] ], [ [[INDEX_NEXT:%.*]], %[[IF_THEN2:.*]] ] +; CHECK: [[VECTOR_BODY:.*]]: +; CHECK: [[VECTOR_BODY1:.*:]] +; CHECK: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_BODY]] ], [ [[INDEX_NEXT:%.*]], %[[IF_THEN1:.*]] ] ; CHECK: [[TMP4:%.*]] = call { <2 x float>, <2 x float> } @llvm.sincos.v2f32(<2 x float> [[WIDE_LOAD:%.*]]) ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP4]], 0 ; CHECK: [[TMP6:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP4]], 1 @@ -107,23 +93,14 @@ define void @predicated_sincos(float %x, ptr noalias %in, ptr noalias writeonly ; CHECK: br label %[[PRED_STORE_CONTINUE]] ; CHECK: [[PRED_STORE_CONTINUE]]: ; CHECK: [[TMP12:%.*]] = extractelement <2 x i1> [[TMP3]], i32 1 -; CHECK: br i1 [[TMP12]], label %[[PRED_STORE_IF1:.*]], label %[[IF_THEN2]] +; CHECK: br i1 [[TMP12]], label %[[PRED_STORE_IF1:.*]], label %[[IF_THEN1]] ; CHECK: [[PRED_STORE_IF1]]: ; CHECK: [[TMP15:%.*]] = extractelement <2 x float> [[TMP5]], i32 1 ; CHECK: store float [[TMP15]], ptr [[TMP14:%.*]], align 4 ; CHECK: [[TMP17:%.*]] = extractelement <2 x float> [[TMP6]], i32 1 ; CHECK: store float [[TMP17]], ptr [[TMP16:%.*]], align 4 -; CHECK: br label %[[IF_THEN2]] -; CHECK: [[IF_THEN2]]: -; CHECK: [[IF_THEN:.*:]] -; CHECK: [[IF_THEN3:.*:]] -; CHECK: [[IF_THEN4:.*:]] -; CHECK: [[IF_THEN1:.*:]] -; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 +; CHECK: br label %[[IF_THEN1]] +; CHECK: [[IF_THEN1]]: ; CHECK: [[IF_MERGE:.*:]] ; CHECK: [[FOR_END:.*:]] ; @@ -167,14 +144,7 @@ define void @modf_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1 ; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4 ; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.modf.f32(float [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 ; CHECK: [[EXIT:.*:]] ; entry: @@ -210,14 +180,7 @@ define void @modf_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1 ; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8 ; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.modf.f64(double [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1 -; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8 -; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8 ; CHECK: [[EXIT:.*:]] ; entry: @@ -253,14 +216,7 @@ define void @sincospi_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noa ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1 ; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4 ; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincospi.f32(float [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 ; CHECK: [[EXIT:.*:]] ; entry: @@ -296,14 +252,7 @@ define void @sincospi_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noa ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1 ; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8 ; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincospi.f64(double [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1 -; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8 -; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8 ; CHECK: [[EXIT:.*:]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/noalias-scope-decl.ll b/llvm/test/Transforms/LoopVectorize/noalias-scope-decl.ll index 9b6774e..481fa04 100644 --- a/llvm/test/Transforms/LoopVectorize/noalias-scope-decl.ll +++ b/llvm/test/Transforms/LoopVectorize/noalias-scope-decl.ll @@ -26,20 +26,6 @@ define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP1:%.*]] = fcmp ogt float [[TMP7]], 1.000000e+02 -; CHECK-NEXT: tail call void @llvm.experimental.noalias.scope.decl(metadata [[META0]]) -; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP7]], 1.000000e+00 -; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX5]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], 1599 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/optsize.ll b/llvm/test/Transforms/LoopVectorize/optsize.ll index 819cfaa..9f82795 100644 --- a/llvm/test/Transforms/LoopVectorize/optsize.ll +++ b/llvm/test/Transforms/LoopVectorize/optsize.ll @@ -273,19 +273,8 @@ define void @pr43371() optsize { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_COND_CLEANUP28:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY29:.*]] ; CHECK: [[FOR_COND_CLEANUP28]]: ; CHECK-NEXT: unreachable -; CHECK: [[FOR_BODY29]]: -; CHECK-NEXT: [[I24_0170:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[INC37:%.*]], %[[FOR_BODY29]] ] -; CHECK-NEXT: [[ADD33:%.*]] = add i16 undef, [[I24_0170]] -; CHECK-NEXT: [[IDXPROM34:%.*]] = zext i16 [[ADD33]] to i32 -; CHECK-NEXT: [[ARRAYIDX35:%.*]] = getelementptr [2592 x i16], ptr @cm_array, i32 0, i32 [[IDXPROM34]] -; CHECK-NEXT: store i16 0, ptr [[ARRAYIDX35]], align 1 -; CHECK-NEXT: [[INC37]] = add i16 [[I24_0170]], 1 -; CHECK-NEXT: [[CMP26:%.*]] = icmp ult i16 [[INC37]], 756 -; CHECK-NEXT: br i1 [[CMP26]], label %[[FOR_BODY29]], label %[[FOR_COND_CLEANUP28]] ; ; PGSO-LABEL: define void @pr43371( ; PGSO-SAME: ) #[[ATTR0]] { @@ -310,19 +299,8 @@ define void @pr43371() optsize { ; PGSO-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; PGSO: [[MIDDLE_BLOCK]]: ; PGSO-NEXT: br label %[[FOR_COND_CLEANUP28:.*]] -; PGSO: [[SCALAR_PH:.*]]: -; PGSO-NEXT: br label %[[FOR_BODY29:.*]] ; PGSO: [[FOR_COND_CLEANUP28]]: ; PGSO-NEXT: unreachable -; PGSO: [[FOR_BODY29]]: -; PGSO-NEXT: [[I24_0170:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[INC37:%.*]], %[[FOR_BODY29]] ] -; PGSO-NEXT: [[ADD33:%.*]] = add i16 undef, [[I24_0170]] -; PGSO-NEXT: [[IDXPROM34:%.*]] = zext i16 [[ADD33]] to i32 -; PGSO-NEXT: [[ARRAYIDX35:%.*]] = getelementptr [2592 x i16], ptr @cm_array, i32 0, i32 [[IDXPROM34]] -; PGSO-NEXT: store i16 0, ptr [[ARRAYIDX35]], align 1 -; PGSO-NEXT: [[INC37]] = add i16 [[I24_0170]], 1 -; PGSO-NEXT: [[CMP26:%.*]] = icmp ult i16 [[INC37]], 756 -; PGSO-NEXT: br i1 [[CMP26]], label %[[FOR_BODY29]], label %[[FOR_COND_CLEANUP28]] ; ; NPGSO-LABEL: define void @pr43371( ; NPGSO-SAME: ) #[[ATTR0]] { @@ -347,19 +325,8 @@ define void @pr43371() optsize { ; NPGSO-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; NPGSO: [[MIDDLE_BLOCK]]: ; NPGSO-NEXT: br label %[[FOR_COND_CLEANUP28:.*]] -; NPGSO: [[SCALAR_PH:.*]]: -; NPGSO-NEXT: br label %[[FOR_BODY29:.*]] ; NPGSO: [[FOR_COND_CLEANUP28]]: ; NPGSO-NEXT: unreachable -; NPGSO: [[FOR_BODY29]]: -; NPGSO-NEXT: [[I24_0170:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[INC37:%.*]], %[[FOR_BODY29]] ] -; NPGSO-NEXT: [[ADD33:%.*]] = add i16 undef, [[I24_0170]] -; NPGSO-NEXT: [[IDXPROM34:%.*]] = zext i16 [[ADD33]] to i32 -; NPGSO-NEXT: [[ARRAYIDX35:%.*]] = getelementptr [2592 x i16], ptr @cm_array, i32 0, i32 [[IDXPROM34]] -; NPGSO-NEXT: store i16 0, ptr [[ARRAYIDX35]], align 1 -; NPGSO-NEXT: [[INC37]] = add i16 [[I24_0170]], 1 -; NPGSO-NEXT: [[CMP26:%.*]] = icmp ult i16 [[INC37]], 756 -; NPGSO-NEXT: br i1 [[CMP26]], label %[[FOR_BODY29]], label %[[FOR_COND_CLEANUP28]] ; ; We do not want to generate SCEV predicates when optimising for size, because ; that will lead to extra code generation such as the SCEV overflow runtime @@ -407,19 +374,8 @@ define void @pr43371_pgso() !prof !14 { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_COND_CLEANUP28:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY29:.*]] ; CHECK: [[FOR_COND_CLEANUP28]]: ; CHECK-NEXT: unreachable -; CHECK: [[FOR_BODY29]]: -; CHECK-NEXT: [[I24_0170:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[INC37:%.*]], %[[FOR_BODY29]] ] -; CHECK-NEXT: [[ADD33:%.*]] = add i16 undef, [[I24_0170]] -; CHECK-NEXT: [[IDXPROM34:%.*]] = zext i16 [[ADD33]] to i32 -; CHECK-NEXT: [[ARRAYIDX35:%.*]] = getelementptr [2592 x i16], ptr @cm_array, i32 0, i32 [[IDXPROM34]] -; CHECK-NEXT: store i16 0, ptr [[ARRAYIDX35]], align 1 -; CHECK-NEXT: [[INC37]] = add i16 [[I24_0170]], 1 -; CHECK-NEXT: [[CMP26:%.*]] = icmp ult i16 [[INC37]], 756 -; CHECK-NEXT: br i1 [[CMP26]], label %[[FOR_BODY29]], label %[[FOR_COND_CLEANUP28]] ; ; PGSO-LABEL: define void @pr43371_pgso( ; PGSO-SAME: ) !prof [[PROF14]] { @@ -444,19 +400,8 @@ define void @pr43371_pgso() !prof !14 { ; PGSO-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; PGSO: [[MIDDLE_BLOCK]]: ; PGSO-NEXT: br label %[[FOR_COND_CLEANUP28:.*]] -; PGSO: [[SCALAR_PH:.*]]: -; PGSO-NEXT: br label %[[FOR_BODY29:.*]] ; PGSO: [[FOR_COND_CLEANUP28]]: ; PGSO-NEXT: unreachable -; PGSO: [[FOR_BODY29]]: -; PGSO-NEXT: [[I24_0170:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[INC37:%.*]], %[[FOR_BODY29]] ] -; PGSO-NEXT: [[ADD33:%.*]] = add i16 undef, [[I24_0170]] -; PGSO-NEXT: [[IDXPROM34:%.*]] = zext i16 [[ADD33]] to i32 -; PGSO-NEXT: [[ARRAYIDX35:%.*]] = getelementptr [2592 x i16], ptr @cm_array, i32 0, i32 [[IDXPROM34]] -; PGSO-NEXT: store i16 0, ptr [[ARRAYIDX35]], align 1 -; PGSO-NEXT: [[INC37]] = add i16 [[I24_0170]], 1 -; PGSO-NEXT: [[CMP26:%.*]] = icmp ult i16 [[INC37]], 756 -; PGSO-NEXT: br i1 [[CMP26]], label %[[FOR_BODY29]], label %[[FOR_COND_CLEANUP28]] ; ; NPGSO-LABEL: define void @pr43371_pgso( ; NPGSO-SAME: ) !prof [[PROF14]] { @@ -686,16 +631,6 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize { ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]] -; CHECK-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]] -; CHECK-NEXT: store i16 42, ptr [[GEPOFB]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret void ; @@ -734,16 +669,6 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize { ; PGSO-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; PGSO: [[MIDDLE_BLOCK]]: ; PGSO-NEXT: br label %[[FOR_END:.*]] -; PGSO: [[SCALAR_PH:.*]]: -; PGSO-NEXT: br label %[[FOR_BODY:.*]] -; PGSO: [[FOR_BODY]]: -; PGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; PGSO-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]] -; PGSO-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]] -; PGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4 -; PGSO-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; PGSO-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025 -; PGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; PGSO: [[FOR_END]]: ; PGSO-NEXT: ret void ; @@ -782,16 +707,6 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize { ; NPGSO-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; NPGSO: [[MIDDLE_BLOCK]]: ; NPGSO-NEXT: br label %[[FOR_END:.*]] -; NPGSO: [[SCALAR_PH:.*]]: -; NPGSO-NEXT: br label %[[FOR_BODY:.*]] -; NPGSO: [[FOR_BODY]]: -; NPGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; NPGSO-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]] -; NPGSO-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]] -; NPGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4 -; NPGSO-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; NPGSO-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025 -; NPGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; NPGSO: [[FOR_END]]: ; NPGSO-NEXT: ret void ; @@ -830,7 +745,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; CHECK-NEXT: store <2 x i16> splat (i16 42), ptr [[TMP1]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[TMP0]], 2 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: @@ -843,7 +758,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; CHECK-NEXT: store i16 42, ptr [[GEPOFB]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret void ; @@ -862,7 +777,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; PGSO-NEXT: store <2 x i16> splat (i16 42), ptr [[TMP1]], align 4 ; PGSO-NEXT: [[INDEX_NEXT]] = add nuw i32 [[TMP0]], 2 ; PGSO-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024 -; PGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; PGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; PGSO: [[MIDDLE_BLOCK]]: ; PGSO-NEXT: br label %[[SCALAR_PH]] ; PGSO: [[SCALAR_PH]]: @@ -875,7 +790,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; PGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4 ; PGSO-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 ; PGSO-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025 -; PGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; PGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; PGSO: [[FOR_END]]: ; PGSO-NEXT: ret void ; @@ -894,7 +809,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; NPGSO-NEXT: store <2 x i16> splat (i16 42), ptr [[TMP1]], align 4 ; NPGSO-NEXT: [[INDEX_NEXT]] = add nuw i32 [[TMP0]], 2 ; NPGSO-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024 -; NPGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; NPGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; NPGSO: [[MIDDLE_BLOCK]]: ; NPGSO-NEXT: br label %[[SCALAR_PH]] ; NPGSO: [[SCALAR_PH]]: @@ -907,7 +822,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; NPGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4 ; NPGSO-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 ; NPGSO-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025 -; NPGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; NPGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; NPGSO: [[FOR_END]]: ; NPGSO-NEXT: ret void ; @@ -1092,10 +1007,8 @@ exit: ; CHECK: [[META17]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META16]], [[META17]]} ; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META16]], [[META17]]} -; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META21:![0-9]+]]} -; CHECK: [[META21]] = !{!"llvm.loop.vectorize.enable", i1 true} -; CHECK: [[LOOP22]] = distinct !{[[LOOP22]], [[META16]], [[META17]]} -; CHECK: [[LOOP23]] = distinct !{[[LOOP23]], [[META16]]} +; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META16]], [[META17]]} +; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META16]]} ;. ; PGSO: [[PROF14]] = !{!"function_entry_count", i64 0} ; PGSO: [[LOOP15]] = distinct !{[[LOOP15]], [[META16:![0-9]+]], [[META17:![0-9]+]]} @@ -1103,10 +1016,8 @@ exit: ; PGSO: [[META17]] = !{!"llvm.loop.unroll.runtime.disable"} ; PGSO: [[LOOP18]] = distinct !{[[LOOP18]], [[META16]], [[META17]]} ; PGSO: [[LOOP19]] = distinct !{[[LOOP19]], [[META16]], [[META17]]} -; PGSO: [[LOOP20]] = distinct !{[[LOOP20]], [[META21:![0-9]+]]} -; PGSO: [[META21]] = !{!"llvm.loop.vectorize.enable", i1 true} -; PGSO: [[LOOP22]] = distinct !{[[LOOP22]], [[META16]], [[META17]]} -; PGSO: [[LOOP23]] = distinct !{[[LOOP23]], [[META16]]} +; PGSO: [[LOOP20]] = distinct !{[[LOOP20]], [[META16]], [[META17]]} +; PGSO: [[LOOP21]] = distinct !{[[LOOP21]], [[META16]]} ;. ; NPGSO: [[PROF14]] = !{!"function_entry_count", i64 0} ; NPGSO: [[LOOP15]] = distinct !{[[LOOP15]], [[META16:![0-9]+]], [[META17:![0-9]+]]} @@ -1119,8 +1030,6 @@ exit: ; NPGSO: [[LOOP22]] = distinct !{[[LOOP22]], [[META16]], [[META17]]} ; NPGSO: [[LOOP23]] = distinct !{[[LOOP23]], [[META17]], [[META16]]} ; NPGSO: [[LOOP24]] = distinct !{[[LOOP24]], [[META16]], [[META17]]} -; NPGSO: [[LOOP25]] = distinct !{[[LOOP25]], [[META26:![0-9]+]]} -; NPGSO: [[META26]] = !{!"llvm.loop.vectorize.enable", i1 true} -; NPGSO: [[LOOP27]] = distinct !{[[LOOP27]], [[META16]], [[META17]]} -; NPGSO: [[LOOP28]] = distinct !{[[LOOP28]], [[META16]]} +; NPGSO: [[LOOP25]] = distinct !{[[LOOP25]], [[META16]], [[META17]]} +; NPGSO: [[LOOP26]] = distinct !{[[LOOP26]], [[META16]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/phi-cost.ll b/llvm/test/Transforms/LoopVectorize/phi-cost.ll index bf5631c..7b5d0b6 100644 --- a/llvm/test/Transforms/LoopVectorize/phi-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/phi-cost.ll @@ -185,13 +185,9 @@ define i32 @red_phi_0(i32 %start, ptr %src) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 -; CHECK-NEXT: br i1 [[TMP1]], label %[[SCALAR_PH:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] -; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: br i1 [[TMP1]], label %[[SCALAR_PH1:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK: [[SCALAR_PH1]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH1:.*:]] -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: br i1 poison, label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[START]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[TMP0]]) diff --git a/llvm/test/Transforms/LoopVectorize/pr154045-dont-fold-extractelement-livein.ll b/llvm/test/Transforms/LoopVectorize/pr154045-dont-fold-extractelement-livein.ll index a2563256..f2d6834 100644 --- a/llvm/test/Transforms/LoopVectorize/pr154045-dont-fold-extractelement-livein.ll +++ b/llvm/test/Transforms/LoopVectorize/pr154045-dont-fold-extractelement-livein.ll @@ -29,22 +29,6 @@ define void @pr154045(ptr %p, i1 %c, i64 %x) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; CHECK-NEXT: br i1 [[C]], label %[[LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: [[REM:%.*]] = srem i64 0, [[X]] -; CHECK-NEXT: br label %[[LATCH]] -; CHECK: [[LATCH]]: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[REM]], %[[ELSE]] ], [ 0, %[[LOOP]] ] -; CHECK-NEXT: [[PHI_TRUNC:%.*]] = trunc i64 [[PHI]] to i32 -; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[PHI_TRUNC]], 0 -; CHECK-NEXT: store i32 [[SHL]], ptr [[P]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/pr32859.ll b/llvm/test/Transforms/LoopVectorize/pr32859.ll index a29a6bd..2d30e0c 100644 --- a/llvm/test/Transforms/LoopVectorize/pr32859.ll +++ b/llvm/test/Transforms/LoopVectorize/pr32859.ll @@ -10,7 +10,7 @@ ; CHECK: %e.0.ph = phi i32 [ 0, %if.end.2.i ], [ 0, %middle.block ] ; Function Attrs: nounwind uwtable -define void @main() #0 { +define void @main(i32 %n) #0 { entry: br label %for.cond1.preheader.i @@ -21,7 +21,7 @@ for.cond1.preheader.i: ; preds = %if.end.2.i, %entry if.end.2.i: ; preds = %for.cond1.preheader.i %inc5.i = add nsw i32 %c.06.i, 1 - %cmp.i = icmp slt i32 %inc5.i, 16 + %cmp.i = icmp slt i32 %inc5.i, %n br i1 %cmp.i, label %for.cond1.preheader.i, label %for.cond.preheader for.cond.preheader: ; preds = %if.end.2.i diff --git a/llvm/test/Transforms/LoopVectorize/pr36983-multiple-lcssa.ll b/llvm/test/Transforms/LoopVectorize/pr36983-multiple-lcssa.ll index b0e2ae6..98963a7 100644 --- a/llvm/test/Transforms/LoopVectorize/pr36983-multiple-lcssa.ll +++ b/llvm/test/Transforms/LoopVectorize/pr36983-multiple-lcssa.ll @@ -20,18 +20,8 @@ define i16 @duplicate_lcssa(i16 %val) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI1:%.*]] = extractelement <4 x i16> [[TMP0]], i32 2 ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RES:%.*]] = phi i16 [ [[VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = sub nsw i16 [[IV]], 1 -; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp ne i16 [[IV_NEXT]], 0 -; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[LOOP]], label %[[EXIT]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[LCSSA_1:%.*]] = phi i16 [ [[RES]], %[[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI1]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[LCSSA_2:%.*]] = phi i16 [ [[RES]], %[[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI1]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i16 [[LCSSA_2]] +; CHECK-NEXT: ret i16 [[VECTOR_RECUR_EXTRACT_FOR_PHI1]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/pr44488-predication.ll b/llvm/test/Transforms/LoopVectorize/pr44488-predication.ll index d1b912d..a1cb361 100644 --- a/llvm/test/Transforms/LoopVectorize/pr44488-predication.ll +++ b/llvm/test/Transforms/LoopVectorize/pr44488-predication.ll @@ -43,26 +43,7 @@ define i16 @test_true_and_false_branch_equal() { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], 12 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_07:%.*]] = phi i16 [ 99, [[SCALAR_PH:%.*]] ], [ [[INC7:%.*]], [[FOR_LATCH:%.*]] ] -; CHECK-NEXT: [[LV:%.*]] = load i16, ptr @v_38, align 1 -; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i16 [[LV]], 32767 -; CHECK-NEXT: br i1 [[CMP1]], label [[COND_END:%.*]], label [[COND_END]] -; CHECK: cond.end: -; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i16 [[LV]], 0 -; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_LATCH]], label [[COND_FALSE4:%.*]] -; CHECK: cond.false4: -; CHECK-NEXT: [[REM:%.*]] = srem i16 5786, [[LV]] -; CHECK-NEXT: br label [[FOR_LATCH]] -; CHECK: for.latch: -; CHECK-NEXT: [[COND6:%.*]] = phi i16 [ [[REM]], [[COND_FALSE4]] ], [ 5786, [[COND_END]] ] -; CHECK-NEXT: store i16 [[COND6]], ptr @v_39, align 1 -; CHECK-NEXT: [[INC7]] = add nsw i16 [[I_07]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i16 [[INC7]], 111 -; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[EXIT]] +; CHECK-NEXT: br label [[FOR_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: [[RV:%.*]] = load i16, ptr @v_39, align 1 ; CHECK-NEXT: ret i16 [[RV]] diff --git a/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll b/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll index 8450db6..9ed35fb 100644 --- a/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll +++ b/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll @@ -57,16 +57,7 @@ define void @pr45679(ptr %A) { ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]] -; CHECK-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], 14 -; CHECK-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -118,16 +109,7 @@ define void @pr45679(ptr %A) { ; VF2UF2-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 ; VF2UF2-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF2UF2: middle.block: -; VF2UF2-NEXT: br label [[EXIT:%.*]] -; VF2UF2: scalar.ph: ; VF2UF2-NEXT: br label [[LOOP:%.*]] -; VF2UF2: loop: -; VF2UF2-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] -; VF2UF2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]] -; VF2UF2-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1 -; VF2UF2-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 -; VF2UF2-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], 14 -; VF2UF2-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP]] ; VF2UF2: exit: ; VF2UF2-NEXT: ret void ; @@ -174,16 +156,7 @@ define void @pr45679(ptr %A) { ; VF1UF4-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 ; VF1UF4-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF1UF4: middle.block: -; VF1UF4-NEXT: br label [[EXIT:%.*]] -; VF1UF4: scalar.ph: ; VF1UF4-NEXT: br label [[LOOP:%.*]] -; VF1UF4: loop: -; VF1UF4-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] -; VF1UF4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]] -; VF1UF4-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1 -; VF1UF4-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 -; VF1UF4-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], 14 -; VF1UF4-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP]] ; VF1UF4: exit: ; VF1UF4-NEXT: ret void ; @@ -253,17 +226,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) { ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: store i64 [[V]], ptr [[B]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 14 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -319,17 +282,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) { ; VF2UF2-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 ; VF2UF2-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VF2UF2: middle.block: -; VF2UF2-NEXT: br label [[FOR_END:%.*]] -; VF2UF2: scalar.ph: ; VF2UF2-NEXT: br label [[FOR_BODY:%.*]] -; VF2UF2: for.body: -; VF2UF2-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; VF2UF2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; VF2UF2-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; VF2UF2-NEXT: store i64 [[V]], ptr [[B]], align 8 -; VF2UF2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; VF2UF2-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 14 -; VF2UF2-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; VF2UF2: for.end: ; VF2UF2-NEXT: ret void ; @@ -380,17 +333,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) { ; VF1UF4-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 ; VF1UF4-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VF1UF4: middle.block: -; VF1UF4-NEXT: br label [[FOR_END:%.*]] -; VF1UF4: scalar.ph: ; VF1UF4-NEXT: br label [[FOR_BODY:%.*]] -; VF1UF4: for.body: -; VF1UF4-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; VF1UF4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; VF1UF4-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; VF1UF4-NEXT: store i64 [[V]], ptr [[B]], align 8 -; VF1UF4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; VF1UF4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 14 -; VF1UF4-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; VF1UF4: for.end: ; VF1UF4-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll b/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll index 673d582..01c6c3f 100644 --- a/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll +++ b/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll @@ -31,23 +31,13 @@ define void @test(i16 %x, i64 %y, ptr %ptr) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: store i32 0, ptr [[PTR]], align 4 -; CHECK-NEXT: [[V2:%.*]] = trunc i64 [[IV]] to i8 -; CHECK-NEXT: [[V3:%.*]] = add i8 [[V2]], 1 -; CHECK-NEXT: [[CMP15:%.*]] = icmp slt i8 [[V3]], 5 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[INC]] -; CHECK-NEXT: br i1 [[CMP15]], label [[LOOP]], label [[LOOP_EXIT]] ; CHECK: loop.exit: ; CHECK-NEXT: [[DIV_1:%.*]] = udiv i64 [[Y]], [[ADD]] ; CHECK-NEXT: [[V1:%.*]] = add i64 [[DIV_1]], 1 ; CHECK-NEXT: br label [[LOOP_2:%.*]] ; CHECK: loop.2: -; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[IV_NEXT_1:%.*]], [[LOOP_2]] ], [ 0, [[LOOP_EXIT]] ] +; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[IV_NEXT_1:%.*]], [[LOOP_2]] ], [ 0, [[LOOP]] ] ; CHECK-NEXT: [[IV_NEXT_1]] = add i64 [[IV_1]], [[V1]] ; CHECK-NEXT: call void @use(i64 [[IV_NEXT_1]]) ; CHECK-NEXT: [[EC:%.*]] = icmp ult i64 [[IV_NEXT_1]], 200 diff --git a/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll b/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll index 75437fe..615ea06 100644 --- a/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll +++ b/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll @@ -61,24 +61,9 @@ define dso_local i16 @reverse_interleave_load_fold_mask() optsize { ; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP28:%.*]] = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> [[TMP26]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 41, [[SCALAR_PH:%.*]] ], [ [[IVMINUS1:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[PREVSUM:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IVMINUS1]] = add nsw i16 [[IV]], -1 -; CHECK-NEXT: [[GEPA0:%.*]] = getelementptr inbounds [40 x [4 x i16]], ptr @A, i16 0, i16 [[IVMINUS1]], i16 0 -; CHECK-NEXT: [[TMP29:%.*]] = load i16, ptr [[GEPA0]], align 1 -; CHECK-NEXT: [[GEPA3:%.*]] = getelementptr inbounds [40 x [4 x i16]], ptr @A, i16 0, i16 [[IVMINUS1]], i16 3 -; CHECK-NEXT: [[TMP30:%.*]] = load i16, ptr [[GEPA3]], align 1 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i16 [[TMP29]], [[TMP30]] -; CHECK-NEXT: [[PREVSUM]] = add nsw i16 [[SUM]], [[ADD]] -; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i16 [[IV]], 1 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: -; CHECK-NEXT: [[PREVSUM_LCSSA:%.*]] = phi i16 [ [[PREVSUM]], [[LOOP]] ], [ [[TMP28]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i16 [[PREVSUM_LCSSA]] +; CHECK-NEXT: ret i16 [[TMP28]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll b/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll index 637b4ab..7b35009 100644 --- a/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll +++ b/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll @@ -33,31 +33,9 @@ define i32 @test(i32 %a, i1 %c.1, i1 %c.2 ) #0 { ; CHECK: middle.block: ; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[PREDPHI7]]) ; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i32> [[PREDPHI5]], i32 1 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 6, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[V_2:%.*]] = phi i32 [ 35902, [[SCALAR_PH]] ], [ [[P_2:%.*]], [[LOOP_LATCH]] ] -; CHECK-NEXT: br i1 [[C_2]], label [[LOOP_LATCH]], label [[BODY_1:%.*]] -; CHECK: body.1: -; CHECK-NEXT: [[V_2_ADD:%.*]] = add i32 [[V_2]], 10 -; CHECK-NEXT: br i1 [[C_1]], label [[LOOP_LATCH]], label [[BODY_2:%.*]] -; CHECK: body.2: -; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[V_2_ADD]], 20 -; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A]], 1 -; CHECK-NEXT: [[ADD_2:%.*]] = add i32 [[ADD_1]], [[XOR]] -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[P_1:%.*]] = phi i32 [ [[IV]], [[LOOP_HEADER]] ], [ 9, [[BODY_1]] ], [ 9, [[BODY_2]] ] -; CHECK-NEXT: [[P_2]] = phi i32 [ [[V_2]], [[LOOP_HEADER]] ], [ [[V_2_ADD]], [[BODY_1]] ], [ [[ADD_2]], [[BODY_2]] ] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp ult i32 [[IV]], 181 -; CHECK-NEXT: br i1 [[EC]], label [[LOOP_HEADER]], label [[EXIT]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: -; CHECK-NEXT: [[E_1:%.*]] = phi i32 [ [[P_1]], [[LOOP_LATCH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[E_2:%.*]] = phi i32 [ [[P_2]], [[LOOP_LATCH]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[RES:%.*]] = add i32 [[E_1]], [[E_2]] +; CHECK-NEXT: [[RES:%.*]] = add i32 [[TMP9]], [[TMP10]] ; CHECK-NEXT: ret i32 [[RES]] ; bb: diff --git a/llvm/test/Transforms/LoopVectorize/pr66616.ll b/llvm/test/Transforms/LoopVectorize/pr66616.ll index d5b2519..1ef614a 100644 --- a/llvm/test/Transforms/LoopVectorize/pr66616.ll +++ b/llvm/test/Transforms/LoopVectorize/pr66616.ll @@ -18,41 +18,32 @@ define void @pr66616(ptr %ptr) { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[PREHEADER:%.*]] -; CHECK: scalar.ph: +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP0]], [[VECTOR_BODY]] ] ; CHECK-NEXT: br label [[LOOP_1:%.*]] -; CHECK: loop.1: -; CHECK-NEXT: [[IV_1:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[LOOP_1]] ] -; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[PTR]], align 4 -; CHECK-NEXT: [[ADD3:%.*]] = add i32 [[LOAD]], 1 -; CHECK-NEXT: [[INC]] = add i8 [[IV_1]], 1 -; CHECK-NEXT: [[COND1:%.*]] = icmp eq i8 [[INC]], 0 -; CHECK-NEXT: br i1 [[COND1]], label [[PREHEADER]], label [[LOOP_1]] ; CHECK: preheader: -; CHECK-NEXT: [[ADD3_LCSSA:%.*]] = phi i32 [ [[ADD3]], [[LOOP_1]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[TMP4:%.*]] = sub i32 0, [[ADD3_LCSSA]] +; CHECK-NEXT: [[TMP4:%.*]] = sub i32 -1, [[DOTLCSSA]] ; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP5]], 1 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP6]], 4 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH1:%.*]], label [[VECTOR_PH2:%.*]] -; CHECK: vector.ph2: +; CHECK: vector.ph1: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP6]], 4 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP6]], [[N_MOD_VF]] ; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 -; CHECK-NEXT: [[IND_END:%.*]] = add i32 [[ADD3_LCSSA]], [[DOTCAST]] +; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP3]], [[DOTCAST]] ; CHECK-NEXT: [[IND_END5:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[N_VEC]] ; CHECK-NEXT: br label [[VECTOR_BODY3:%.*]] -; CHECK: vector.body3: +; CHECK: vector.body2: ; CHECK-NEXT: [[INDEX8:%.*]] = phi i64 [ 0, [[VECTOR_PH2]] ], [ [[INDEX_NEXT9:%.*]], [[VECTOR_BODY3]] ] ; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX8]], 4 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK6:%.*]], label [[VECTOR_BODY3]], !llvm.loop [[LOOP3:![0-9]+]] -; CHECK: middle.block6: +; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK5:%.*]], label [[VECTOR_BODY3]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: middle.block5: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP6]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH1]] -; CHECK: scalar.ph1: -; CHECK-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK6]] ], [ [[ADD3_LCSSA]], [[PREHEADER]] ] -; CHECK-NEXT: [[BC_RESUME_VAL6:%.*]] = phi ptr [ [[IND_END5]], [[MIDDLE_BLOCK6]] ], [ [[PTR]], [[PREHEADER]] ] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[TMP8]], [[MIDDLE_BLOCK5]] ], [ [[TMP3]], [[LOOP_1]] ] +; CHECK-NEXT: [[BC_RESUME_VAL6:%.*]] = phi ptr [ [[IND_END5]], [[MIDDLE_BLOCK5]] ], [ [[PTR]], [[LOOP_1]] ] ; CHECK-NEXT: br label [[LOOP_2:%.*]] ; CHECK: loop.2: ; CHECK-NEXT: [[IV_2:%.*]] = phi i32 [ [[IV_2_I:%.*]], [[LOOP_2]] ], [ [[BC_RESUME_VAL4]], [[SCALAR_PH1]] ] diff --git a/llvm/test/Transforms/LoopVectorize/predicate-switch.ll b/llvm/test/Transforms/LoopVectorize/predicate-switch.ll index 70428f0..565e203 100644 --- a/llvm/test/Transforms/LoopVectorize/predicate-switch.ll +++ b/llvm/test/Transforms/LoopVectorize/predicate-switch.ll @@ -425,20 +425,6 @@ define void @switch_all_to_default(ptr %start) { ; IC1-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IC1: [[MIDDLE_BLOCK]]: ; IC1-NEXT: br label %[[EXIT:.*]] -; IC1: [[SCALAR_PH:.*]]: -; IC1-NEXT: br label %[[LOOP_HEADER:.*]] -; IC1: [[LOOP_HEADER]]: -; IC1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; IC1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IC1-NEXT: switch i64 [[IV]], label %[[LOOP_LATCH]] [ -; IC1-NEXT: i64 120, label %[[LOOP_LATCH]] -; IC1-NEXT: i64 100, label %[[LOOP_LATCH]] -; IC1-NEXT: ] -; IC1: [[LOOP_LATCH]]: -; IC1-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[START]], i64 [[IV]] -; IC1-NEXT: store i64 42, ptr [[GEP]], align 1 -; IC1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; IC1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; IC1: [[EXIT]]: ; IC1-NEXT: ret void ; @@ -459,20 +445,6 @@ define void @switch_all_to_default(ptr %start) { ; IC2-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IC2: [[MIDDLE_BLOCK]]: ; IC2-NEXT: br label %[[EXIT:.*]] -; IC2: [[SCALAR_PH:.*]]: -; IC2-NEXT: br label %[[LOOP_HEADER:.*]] -; IC2: [[LOOP_HEADER]]: -; IC2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; IC2-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IC2-NEXT: switch i64 [[IV]], label %[[LOOP_LATCH]] [ -; IC2-NEXT: i64 120, label %[[LOOP_LATCH]] -; IC2-NEXT: i64 100, label %[[LOOP_LATCH]] -; IC2-NEXT: ] -; IC2: [[LOOP_LATCH]]: -; IC2-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[START]], i64 [[IV]] -; IC2-NEXT: store i64 42, ptr [[GEP]], align 1 -; IC2-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; IC2-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; IC2: [[EXIT]]: ; IC2-NEXT: ret void ; @@ -513,21 +485,6 @@ define void @switch_unconditional(ptr %start) { ; IC1-NEXT: br i1 [[TMP0]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IC1: [[MIDDLE_BLOCK]]: ; IC1-NEXT: br label %[[EXIT:.*]] -; IC1: [[SCALAR_PH:.*]]: -; IC1-NEXT: br label %[[LOOP:.*]] -; IC1: [[LOOP]]: -; IC1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; IC1-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[START]], i64 [[IV]] -; IC1-NEXT: [[X:%.*]] = load i32, ptr [[GEP]], align 4 -; IC1-NEXT: switch i32 [[X]], label %[[FOO:.*]] [ -; IC1-NEXT: ] -; IC1: [[FOO]]: -; IC1-NEXT: br label %[[LATCH]] -; IC1: [[LATCH]]: -; IC1-NEXT: store i32 0, ptr [[GEP]], align 4 -; IC1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IC1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; IC1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP]] ; IC1: [[EXIT]]: ; IC1-NEXT: ret void ; @@ -548,21 +505,6 @@ define void @switch_unconditional(ptr %start) { ; IC2-NEXT: br i1 [[TMP0]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IC2: [[MIDDLE_BLOCK]]: ; IC2-NEXT: br label %[[EXIT:.*]] -; IC2: [[SCALAR_PH:.*]]: -; IC2-NEXT: br label %[[LOOP:.*]] -; IC2: [[LOOP]]: -; IC2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; IC2-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[START]], i64 [[IV]] -; IC2-NEXT: [[X:%.*]] = load i32, ptr [[GEP]], align 4 -; IC2-NEXT: switch i32 [[X]], label %[[FOO:.*]] [ -; IC2-NEXT: ] -; IC2: [[FOO]]: -; IC2-NEXT: br label %[[LATCH]] -; IC2: [[LATCH]]: -; IC2-NEXT: store i32 0, ptr [[GEP]], align 4 -; IC2-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IC2-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; IC2-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP]] ; IC2: [[EXIT]]: ; IC2-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll b/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll index dfdaaf1..52555d5 100644 --- a/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll +++ b/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll @@ -58,26 +58,6 @@ define void @loop_invariant_store(ptr %p, i64 %a, i8 %b) { ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[ADD]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2 -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48 -; CHECK-NEXT: [[ASHR:%.*]] = ashr i64 [[SHL]], 52 -; CHECK-NEXT: [[TRUNC_I32:%.*]] = trunc i64 [[ASHR]] to i32 -; CHECK-NEXT: br i1 [[CMP_SLT]], label %[[COND_FALSE:.*]], label %[[LOOP_LATCH]] -; CHECK: [[COND_FALSE]]: -; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[B]] to i32 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[TRUNC_I32]], %[[LOOP_HEADER]] ], [ [[ZEXT]], %[[COND_FALSE]] ] -; CHECK-NEXT: [[SHL_I32:%.*]] = shl i32 [[COND]], 8 -; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[SHL_I32]] to i8 -; CHECK-NEXT: store i8 [[TRUNC]], ptr [[P]], align 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV]], 8 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -174,28 +154,6 @@ define void @loop_invariant_srem(ptr %p, i64 %a, i8 %b) { ; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i8 [[IV]], 2 -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48 -; CHECK-NEXT: [[ASHR:%.*]] = ashr i64 [[SHL]], 52 -; CHECK-NEXT: [[TRUNC_I32:%.*]] = trunc i64 [[ASHR]] to i32 -; CHECK-NEXT: br i1 [[CMP_SLT]], label %[[COND_FALSE:.*]], label %[[LOOP_LATCH]] -; CHECK: [[COND_FALSE]]: -; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[B]] to i32 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[TRUNC_I32]], %[[LOOP_HEADER]] ], [ [[ZEXT]], %[[COND_FALSE]] ] -; CHECK-NEXT: [[SHL_I32:%.*]] = shl i32 [[COND]], 8 -; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[SHL_I32]] to i8 -; CHECK-NEXT: [[REM:%.*]] = srem i8 [[IV]], [[TRUNC]] -; CHECK-NEXT: [[GEP_P_REM:%.*]] = getelementptr i32, ptr [[P]], i8 [[REM]] -; CHECK-NEXT: store i32 4, ptr [[GEP_P_REM]], align 4 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i8 [[IV]], 8 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -245,19 +203,6 @@ define void @loop_invariant_float_store(ptr %p, i32 %a) { ; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2 -; CHECK-NEXT: br i1 [[CMP_SLT]], label %[[COND_FALSE:.*]], label %[[LOOP_LATCH]] -; CHECK: [[COND_FALSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: store float [[TMP10]], ptr [[P]], align 4 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp slt i32 [[IV]], 8 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -315,19 +260,6 @@ define void @test_store_to_invariant_address_needs_mask_due_to_low_trip_count(pt ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ 1, %[[LOOP_HEADER]] ], [ 0, %[[ELSE]] ] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[DST]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i16 [[IV_NEXT]], 3 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-and-loop-metadata.ll b/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-and-loop-metadata.ll index 14526af..6542c42 100644 --- a/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-and-loop-metadata.ll +++ b/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-and-loop-metadata.ll @@ -27,17 +27,6 @@ define void @_Z3fooPf(ptr %a) { ; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[P:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[MUL:%.*]] = fmul float [[P]], 2.000000e+00 -; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret void ; @@ -58,25 +47,8 @@ define void @_Z3fooPf(ptr %a) { ; DEBUGLOC-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG24]], !llvm.loop [[LOOP25:![0-9]+]] ; DEBUGLOC: [[MIDDLE_BLOCK]]: ; DEBUGLOC-NEXT: br label %[[FOR_END:.*]], !dbg [[DBG24]] -; DEBUGLOC: [[SCALAR_PH:.*]]: -; DEBUGLOC-NEXT: br label %[[FOR_BODY:.*]], !dbg [[DBG18]] -; DEBUGLOC: [[FOR_BODY]]: -; DEBUGLOC-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], !dbg [[DBG19]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[INDVARS_IV]], [[META9:![0-9]+]], !DIExpression(), [[DBG19]]) -; DEBUGLOC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]], !dbg [[DBG20]] -; DEBUGLOC-NEXT: #dbg_value(ptr [[ARRAYIDX]], [[META11:![0-9]+]], !DIExpression(), [[DBG20]]) -; DEBUGLOC-NEXT: [[P:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !dbg [[DBG21]] -; DEBUGLOC-NEXT: #dbg_value(float [[P]], [[META12:![0-9]+]], !DIExpression(), [[DBG21]]) -; DEBUGLOC-NEXT: [[MUL:%.*]] = fmul float [[P]], 2.000000e+00, !dbg [[DBG22]] -; DEBUGLOC-NEXT: #dbg_value(float [[MUL]], [[META14:![0-9]+]], !DIExpression(), [[DBG22]]) -; DEBUGLOC-NEXT: store float [[MUL]], ptr [[ARRAYIDX]], align 4, !dbg [[DBG23]] -; DEBUGLOC-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1, !dbg [[DBG28:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[INDVARS_IV_NEXT]], [[META15:![0-9]+]], !DIExpression(), [[DBG28]]) -; DEBUGLOC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024, !dbg [[DBG29:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i1 [[EXITCOND]], [[META16:![0-9]+]], !DIExpression(), [[DBG29]]) -; DEBUGLOC-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]], !dbg [[DBG24]], !llvm.loop [[LOOP30:![0-9]+]] ; DEBUGLOC: [[FOR_END]]: -; DEBUGLOC-NEXT: ret void, !dbg [[DBG32:![0-9]+]] +; DEBUGLOC-NEXT: ret void, !dbg [[DBG28:![0-9]+]] ; entry: br label %for.body @@ -122,7 +94,7 @@ define void @widen_ptr_induction_dbg(ptr %start, ptr %end) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 32 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -134,54 +106,54 @@ define void @widen_ptr_induction_dbg(ptr %start, ptr %end) { ; CHECK-NEXT: [[IV_NEXT]] = getelementptr inbounds ptr, ptr [[IV]], i64 1 ; CHECK-NEXT: store ptr [[IV]], ptr [[IV]], align 1 ; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq ptr [[IV_NEXT]], [[END]] -; CHECK-NEXT: br i1 [[CMP_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; ; DEBUGLOC-LABEL: define void @widen_ptr_induction_dbg( -; DEBUGLOC-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) !dbg [[DBG33:![0-9]+]] { +; DEBUGLOC-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) !dbg [[DBG29:![0-9]+]] { ; DEBUGLOC-NEXT: [[ENTRY:.*]]: -; DEBUGLOC-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64, !dbg [[DBG38:![0-9]+]] -; DEBUGLOC-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64, !dbg [[DBG38]] -; DEBUGLOC-NEXT: [[TMP0:%.*]] = add i64 [[END1]], -8, !dbg [[DBG38]] -; DEBUGLOC-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START2]], !dbg [[DBG38]] -; DEBUGLOC-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3, !dbg [[DBG38]] -; DEBUGLOC-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1, !dbg [[DBG38]] -; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 4, !dbg [[DBG38]] -; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]], !dbg [[DBG38]] +; DEBUGLOC-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64, !dbg [[DBG34:![0-9]+]] +; DEBUGLOC-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64, !dbg [[DBG34]] +; DEBUGLOC-NEXT: [[TMP0:%.*]] = add i64 [[END1]], -8, !dbg [[DBG34]] +; DEBUGLOC-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START2]], !dbg [[DBG34]] +; DEBUGLOC-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3, !dbg [[DBG34]] +; DEBUGLOC-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1, !dbg [[DBG34]] +; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 4, !dbg [[DBG34]] +; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]], !dbg [[DBG34]] ; DEBUGLOC: [[VECTOR_PH]]: ; DEBUGLOC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 4 ; DEBUGLOC-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] ; DEBUGLOC-NEXT: [[TMP4:%.*]] = mul i64 [[N_VEC]], 8 ; DEBUGLOC-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP4]] -; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG38]] +; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG34]] ; DEBUGLOC: [[VECTOR_BODY]]: ; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; DEBUGLOC-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], %[[VECTOR_PH]] ], [ [[PTR_IND:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG39:![0-9]+]] -; DEBUGLOC-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> <i64 0, i64 8, i64 16, i64 24>, !dbg [[DBG39]] -; DEBUGLOC-NEXT: [[TMP6:%.*]] = extractelement <4 x ptr> [[VECTOR_GEP]], i32 0, !dbg [[DBG40:![0-9]+]] -; DEBUGLOC-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP6]], align 1, !dbg [[DBG40]] +; DEBUGLOC-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], %[[VECTOR_PH]] ], [ [[PTR_IND:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG35:![0-9]+]] +; DEBUGLOC-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> <i64 0, i64 8, i64 16, i64 24>, !dbg [[DBG35]] +; DEBUGLOC-NEXT: [[TMP6:%.*]] = extractelement <4 x ptr> [[VECTOR_GEP]], i32 0, !dbg [[DBG36:![0-9]+]] +; DEBUGLOC-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP6]], align 1, !dbg [[DBG36]] ; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; DEBUGLOC-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 32, !dbg [[DBG39]] -; DEBUGLOC-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG41:![0-9]+]] -; DEBUGLOC-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG41]], !llvm.loop [[LOOP42:![0-9]+]] +; DEBUGLOC-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 32, !dbg [[DBG35]] +; DEBUGLOC-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG37:![0-9]+]] +; DEBUGLOC-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG37]], !llvm.loop [[LOOP38:![0-9]+]] ; DEBUGLOC: [[MIDDLE_BLOCK]]: -; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]], !dbg [[DBG41]] -; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG41]] +; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]], !dbg [[DBG37]] +; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG37]] ; DEBUGLOC: [[SCALAR_PH]]: -; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP5]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ], !dbg [[DBG39]] -; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG38]] +; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP5]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ], !dbg [[DBG35]] +; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG34]] ; DEBUGLOC: [[LOOP]]: -; DEBUGLOC-NEXT: [[IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG39]] -; DEBUGLOC-NEXT: #dbg_value(ptr [[IV]], [[META35:![0-9]+]], !DIExpression(), [[DBG39]]) -; DEBUGLOC-NEXT: [[IV_NEXT]] = getelementptr inbounds ptr, ptr [[IV]], i64 1, !dbg [[DBG43:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(ptr [[IV_NEXT]], [[META36:![0-9]+]], !DIExpression(), [[DBG43]]) -; DEBUGLOC-NEXT: store ptr [[IV]], ptr [[IV]], align 1, !dbg [[DBG40]] -; DEBUGLOC-NEXT: [[CMP_NOT:%.*]] = icmp eq ptr [[IV_NEXT]], [[END]], !dbg [[DBG44:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i1 [[CMP_NOT]], [[META37:![0-9]+]], !DIExpression(), [[DBG44]]) -; DEBUGLOC-NEXT: br i1 [[CMP_NOT]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG41]], !llvm.loop [[LOOP45:![0-9]+]] +; DEBUGLOC-NEXT: [[IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG35]] +; DEBUGLOC-NEXT: #dbg_value(ptr [[IV]], [[META31:![0-9]+]], !DIExpression(), [[DBG35]]) +; DEBUGLOC-NEXT: [[IV_NEXT]] = getelementptr inbounds ptr, ptr [[IV]], i64 1, !dbg [[DBG39:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(ptr [[IV_NEXT]], [[META32:![0-9]+]], !DIExpression(), [[DBG39]]) +; DEBUGLOC-NEXT: store ptr [[IV]], ptr [[IV]], align 1, !dbg [[DBG36]] +; DEBUGLOC-NEXT: [[CMP_NOT:%.*]] = icmp eq ptr [[IV_NEXT]], [[END]], !dbg [[DBG40:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(i1 [[CMP_NOT]], [[META33:![0-9]+]], !DIExpression(), [[DBG40]]) +; DEBUGLOC-NEXT: br i1 [[CMP_NOT]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG37]], !llvm.loop [[LOOP41:![0-9]+]] ; DEBUGLOC: [[EXIT]]: -; DEBUGLOC-NEXT: ret void, !dbg [[DBG46:![0-9]+]] +; DEBUGLOC-NEXT: ret void, !dbg [[DBG42:![0-9]+]] ; entry: br label %loop @@ -254,7 +226,7 @@ define void @predicated_phi_dbg(i64 %n, ptr %x) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] @@ -274,96 +246,96 @@ define void @predicated_phi_dbg(i64 %n, ptr %x) { ; CHECK-NEXT: store i64 [[D]], ptr [[IDX]], align 8 ; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[COND]], label %[[FOR_BODY]], label %[[FOR_END]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[COND]], label %[[FOR_BODY]], label %[[FOR_END]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret void ; ; DEBUGLOC-LABEL: define void @predicated_phi_dbg( -; DEBUGLOC-SAME: i64 [[N:%.*]], ptr [[X:%.*]]) !dbg [[DBG47:![0-9]+]] { +; DEBUGLOC-SAME: i64 [[N:%.*]], ptr [[X:%.*]]) !dbg [[DBG43:![0-9]+]] { ; DEBUGLOC-NEXT: [[ENTRY:.*]]: -; DEBUGLOC-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1), !dbg [[DBG56:![0-9]+]] -; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4, !dbg [[DBG56]] -; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]], !dbg [[DBG56]] +; DEBUGLOC-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1), !dbg [[DBG52:![0-9]+]] +; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4, !dbg [[DBG52]] +; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]], !dbg [[DBG52]] ; DEBUGLOC: [[VECTOR_PH]]: ; DEBUGLOC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 ; DEBUGLOC-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] -; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG56]] +; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG52]] ; DEBUGLOC: [[VECTOR_BODY]]: -; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_UDIV_CONTINUE6:.*]] ], !dbg [[DBG57:![0-9]+]] -; DEBUGLOC-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_UDIV_CONTINUE6]] ], !dbg [[DBG57]] -; DEBUGLOC-NEXT: [[TMP0:%.*]] = icmp ult <4 x i64> [[VEC_IND]], splat (i64 5), !dbg [[DBG58:![0-9]+]] -; DEBUGLOC-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0, !dbg [[DBG58]] -; DEBUGLOC-NEXT: br i1 [[TMP1]], label %[[PRED_UDIV_IF:.*]], label %[[PRED_UDIV_CONTINUE:.*]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_UDIV_CONTINUE6:.*]] ], !dbg [[DBG53:![0-9]+]] +; DEBUGLOC-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_UDIV_CONTINUE6]] ], !dbg [[DBG53]] +; DEBUGLOC-NEXT: [[TMP0:%.*]] = icmp ult <4 x i64> [[VEC_IND]], splat (i64 5), !dbg [[DBG54:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0, !dbg [[DBG54]] +; DEBUGLOC-NEXT: br i1 [[TMP1]], label %[[PRED_UDIV_IF:.*]], label %[[PRED_UDIV_CONTINUE:.*]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_IF]]: -; DEBUGLOC-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0, !dbg [[DBG57]] -; DEBUGLOC-NEXT: [[TMP3:%.*]] = udiv i64 [[N]], [[TMP2]], !dbg [[DBG59:![0-9]+]] -; DEBUGLOC-NEXT: [[TMP4:%.*]] = insertelement <4 x i64> poison, i64 [[TMP3]], i32 0, !dbg [[DBG59]] -; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0, !dbg [[DBG53]] +; DEBUGLOC-NEXT: [[TMP3:%.*]] = udiv i64 [[N]], [[TMP2]], !dbg [[DBG55:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP4:%.*]] = insertelement <4 x i64> poison, i64 [[TMP3]], i32 0, !dbg [[DBG55]] +; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_CONTINUE]]: -; DEBUGLOC-NEXT: [[TMP5:%.*]] = phi <4 x i64> [ poison, %[[VECTOR_BODY]] ], [ [[TMP4]], %[[PRED_UDIV_IF]] ], !dbg [[DBG59]] -; DEBUGLOC-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i32 1, !dbg [[DBG58]] -; DEBUGLOC-NEXT: br i1 [[TMP6]], label %[[PRED_UDIV_IF1:.*]], label %[[PRED_UDIV_CONTINUE2:.*]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[TMP5:%.*]] = phi <4 x i64> [ poison, %[[VECTOR_BODY]] ], [ [[TMP4]], %[[PRED_UDIV_IF]] ], !dbg [[DBG55]] +; DEBUGLOC-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i32 1, !dbg [[DBG54]] +; DEBUGLOC-NEXT: br i1 [[TMP6]], label %[[PRED_UDIV_IF1:.*]], label %[[PRED_UDIV_CONTINUE2:.*]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_IF1]]: -; DEBUGLOC-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 1, !dbg [[DBG57]] -; DEBUGLOC-NEXT: [[TMP8:%.*]] = udiv i64 [[N]], [[TMP7]], !dbg [[DBG59]] -; DEBUGLOC-NEXT: [[TMP9:%.*]] = insertelement <4 x i64> [[TMP5]], i64 [[TMP8]], i32 1, !dbg [[DBG59]] -; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE2]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 1, !dbg [[DBG53]] +; DEBUGLOC-NEXT: [[TMP8:%.*]] = udiv i64 [[N]], [[TMP7]], !dbg [[DBG55]] +; DEBUGLOC-NEXT: [[TMP9:%.*]] = insertelement <4 x i64> [[TMP5]], i64 [[TMP8]], i32 1, !dbg [[DBG55]] +; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE2]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_CONTINUE2]]: -; DEBUGLOC-NEXT: [[TMP10:%.*]] = phi <4 x i64> [ [[TMP5]], %[[PRED_UDIV_CONTINUE]] ], [ [[TMP9]], %[[PRED_UDIV_IF1]] ], !dbg [[DBG59]] -; DEBUGLOC-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP0]], i32 2, !dbg [[DBG58]] -; DEBUGLOC-NEXT: br i1 [[TMP11]], label %[[PRED_UDIV_IF3:.*]], label %[[PRED_UDIV_CONTINUE4:.*]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[TMP10:%.*]] = phi <4 x i64> [ [[TMP5]], %[[PRED_UDIV_CONTINUE]] ], [ [[TMP9]], %[[PRED_UDIV_IF1]] ], !dbg [[DBG55]] +; DEBUGLOC-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP0]], i32 2, !dbg [[DBG54]] +; DEBUGLOC-NEXT: br i1 [[TMP11]], label %[[PRED_UDIV_IF3:.*]], label %[[PRED_UDIV_CONTINUE4:.*]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_IF3]]: -; DEBUGLOC-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 2, !dbg [[DBG57]] -; DEBUGLOC-NEXT: [[TMP13:%.*]] = udiv i64 [[N]], [[TMP12]], !dbg [[DBG59]] -; DEBUGLOC-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP10]], i64 [[TMP13]], i32 2, !dbg [[DBG59]] -; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE4]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 2, !dbg [[DBG53]] +; DEBUGLOC-NEXT: [[TMP13:%.*]] = udiv i64 [[N]], [[TMP12]], !dbg [[DBG55]] +; DEBUGLOC-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP10]], i64 [[TMP13]], i32 2, !dbg [[DBG55]] +; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE4]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_CONTINUE4]]: -; DEBUGLOC-NEXT: [[TMP15:%.*]] = phi <4 x i64> [ [[TMP10]], %[[PRED_UDIV_CONTINUE2]] ], [ [[TMP14]], %[[PRED_UDIV_IF3]] ], !dbg [[DBG59]] -; DEBUGLOC-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP0]], i32 3, !dbg [[DBG58]] -; DEBUGLOC-NEXT: br i1 [[TMP16]], label %[[PRED_UDIV_IF5:.*]], label %[[PRED_UDIV_CONTINUE6]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[TMP15:%.*]] = phi <4 x i64> [ [[TMP10]], %[[PRED_UDIV_CONTINUE2]] ], [ [[TMP14]], %[[PRED_UDIV_IF3]] ], !dbg [[DBG55]] +; DEBUGLOC-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP0]], i32 3, !dbg [[DBG54]] +; DEBUGLOC-NEXT: br i1 [[TMP16]], label %[[PRED_UDIV_IF5:.*]], label %[[PRED_UDIV_CONTINUE6]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_IF5]]: -; DEBUGLOC-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 3, !dbg [[DBG57]] -; DEBUGLOC-NEXT: [[TMP18:%.*]] = udiv i64 [[N]], [[TMP17]], !dbg [[DBG59]] -; DEBUGLOC-NEXT: [[TMP19:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP18]], i32 3, !dbg [[DBG59]] -; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE6]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 3, !dbg [[DBG53]] +; DEBUGLOC-NEXT: [[TMP18:%.*]] = udiv i64 [[N]], [[TMP17]], !dbg [[DBG55]] +; DEBUGLOC-NEXT: [[TMP19:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP18]], i32 3, !dbg [[DBG55]] +; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE6]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_CONTINUE6]]: -; DEBUGLOC-NEXT: [[TMP20:%.*]] = phi <4 x i64> [ [[TMP15]], %[[PRED_UDIV_CONTINUE4]] ], [ [[TMP19]], %[[PRED_UDIV_IF5]] ], !dbg [[DBG59]] -; DEBUGLOC-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP20]], <4 x i64> zeroinitializer, !dbg [[DBG60:![0-9]+]] -; DEBUGLOC-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[X]], i64 [[INDEX]], !dbg [[DBG61:![0-9]+]] -; DEBUGLOC-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP21]], align 8, !dbg [[DBG62:![0-9]+]] -; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG57]] -; DEBUGLOC-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4), !dbg [[DBG57]] -; DEBUGLOC-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG63:![0-9]+]] -; DEBUGLOC-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG63]], !llvm.loop [[LOOP64:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP20:%.*]] = phi <4 x i64> [ [[TMP15]], %[[PRED_UDIV_CONTINUE4]] ], [ [[TMP19]], %[[PRED_UDIV_IF5]] ], !dbg [[DBG55]] +; DEBUGLOC-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP20]], <4 x i64> zeroinitializer, !dbg [[DBG56:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[X]], i64 [[INDEX]], !dbg [[DBG57:![0-9]+]] +; DEBUGLOC-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP21]], align 8, !dbg [[DBG58:![0-9]+]] +; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG53]] +; DEBUGLOC-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4), !dbg [[DBG53]] +; DEBUGLOC-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG59:![0-9]+]] +; DEBUGLOC-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG59]], !llvm.loop [[LOOP60:![0-9]+]] ; DEBUGLOC: [[MIDDLE_BLOCK]]: -; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]], !dbg [[DBG63]] -; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]], !dbg [[DBG63]] +; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]], !dbg [[DBG59]] +; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]], !dbg [[DBG59]] ; DEBUGLOC: [[SCALAR_PH]]: -; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], !dbg [[DBG57]] -; DEBUGLOC-NEXT: br label %[[FOR_BODY:.*]], !dbg [[DBG56]] +; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], !dbg [[DBG53]] +; DEBUGLOC-NEXT: br label %[[FOR_BODY:.*]], !dbg [[DBG52]] ; DEBUGLOC: [[FOR_BODY]]: -; DEBUGLOC-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[FOR_INC:.*]] ], !dbg [[DBG57]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[I]], [[META49:![0-9]+]], !DIExpression(), [[DBG57]]) -; DEBUGLOC-NEXT: [[CMP:%.*]] = icmp ult i64 [[I]], 5, !dbg [[DBG58]] -; DEBUGLOC-NEXT: #dbg_value(i1 [[CMP]], [[META50:![0-9]+]], !DIExpression(), [[DBG58]]) -; DEBUGLOC-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[FOR_INC]], !dbg [[DBG65:![0-9]+]] +; DEBUGLOC-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[FOR_INC:.*]] ], !dbg [[DBG53]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[I]], [[META45:![0-9]+]], !DIExpression(), [[DBG53]]) +; DEBUGLOC-NEXT: [[CMP:%.*]] = icmp ult i64 [[I]], 5, !dbg [[DBG54]] +; DEBUGLOC-NEXT: #dbg_value(i1 [[CMP]], [[META46:![0-9]+]], !DIExpression(), [[DBG54]]) +; DEBUGLOC-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[FOR_INC]], !dbg [[DBG61:![0-9]+]] ; DEBUGLOC: [[IF_THEN]]: -; DEBUGLOC-NEXT: [[TMP4:%.*]] = udiv i64 [[N]], [[I]], !dbg [[DBG59]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[TMP4]], [[META51:![0-9]+]], !DIExpression(), [[DBG59]]) -; DEBUGLOC-NEXT: br label %[[FOR_INC]], !dbg [[DBG66:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP4:%.*]] = udiv i64 [[N]], [[I]], !dbg [[DBG55]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[TMP4]], [[META47:![0-9]+]], !DIExpression(), [[DBG55]]) +; DEBUGLOC-NEXT: br label %[[FOR_INC]], !dbg [[DBG62:![0-9]+]] ; DEBUGLOC: [[FOR_INC]]: -; DEBUGLOC-NEXT: [[D:%.*]] = phi i64 [ 0, %[[FOR_BODY]] ], [ [[TMP4]], %[[IF_THEN]] ], !dbg [[DBG60]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[D]], [[META52:![0-9]+]], !DIExpression(), [[DBG60]]) -; DEBUGLOC-NEXT: [[IDX:%.*]] = getelementptr i64, ptr [[X]], i64 [[I]], !dbg [[DBG61]] -; DEBUGLOC-NEXT: #dbg_value(ptr [[IDX]], [[META53:![0-9]+]], !DIExpression(), [[DBG61]]) -; DEBUGLOC-NEXT: store i64 [[D]], ptr [[IDX]], align 8, !dbg [[DBG62]] -; DEBUGLOC-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1, !dbg [[DBG67:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[I_NEXT]], [[META54:![0-9]+]], !DIExpression(), [[DBG67]]) -; DEBUGLOC-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]], !dbg [[DBG68:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i1 [[COND]], [[META55:![0-9]+]], !DIExpression(), [[DBG68]]) -; DEBUGLOC-NEXT: br i1 [[COND]], label %[[FOR_BODY]], label %[[FOR_END]], !dbg [[DBG63]], !llvm.loop [[LOOP69:![0-9]+]] +; DEBUGLOC-NEXT: [[D:%.*]] = phi i64 [ 0, %[[FOR_BODY]] ], [ [[TMP4]], %[[IF_THEN]] ], !dbg [[DBG56]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[D]], [[META48:![0-9]+]], !DIExpression(), [[DBG56]]) +; DEBUGLOC-NEXT: [[IDX:%.*]] = getelementptr i64, ptr [[X]], i64 [[I]], !dbg [[DBG57]] +; DEBUGLOC-NEXT: #dbg_value(ptr [[IDX]], [[META49:![0-9]+]], !DIExpression(), [[DBG57]]) +; DEBUGLOC-NEXT: store i64 [[D]], ptr [[IDX]], align 8, !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1, !dbg [[DBG63:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[I_NEXT]], [[META50:![0-9]+]], !DIExpression(), [[DBG63]]) +; DEBUGLOC-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]], !dbg [[DBG64:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(i1 [[COND]], [[META51:![0-9]+]], !DIExpression(), [[DBG64]]) +; DEBUGLOC-NEXT: br i1 [[COND]], label %[[FOR_BODY]], label %[[FOR_END]], !dbg [[DBG59]], !llvm.loop [[LOOP65:![0-9]+]] ; DEBUGLOC: [[FOR_END]]: -; DEBUGLOC-NEXT: ret void, !dbg [[DBG70:![0-9]+]] +; DEBUGLOC-NEXT: ret void, !dbg [[DBG66:![0-9]+]] ; entry: br label %for.body @@ -415,7 +387,7 @@ define void @scalar_cast_dbg(ptr nocapture %a, i32 %start, i64 %k) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4) ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[K]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -429,57 +401,57 @@ define void @scalar_cast_dbg(ptr nocapture %a, i32 %start, i64 %k) { ; CHECK-NEXT: store i32 [[TRUNC_IV]], ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[K]] -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; ; DEBUGLOC-LABEL: define void @scalar_cast_dbg( -; DEBUGLOC-SAME: ptr captures(none) [[A:%.*]], i32 [[START:%.*]], i64 [[K:%.*]]) !dbg [[DBG71:![0-9]+]] { +; DEBUGLOC-SAME: ptr captures(none) [[A:%.*]], i32 [[START:%.*]], i64 [[K:%.*]]) !dbg [[DBG67:![0-9]+]] { ; DEBUGLOC-NEXT: [[ENTRY:.*]]: -; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[K]], 4, !dbg [[DBG78:![0-9]+]] -; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]], !dbg [[DBG78]] +; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[K]], 4, !dbg [[DBG74:![0-9]+]] +; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]], !dbg [[DBG74]] ; DEBUGLOC: [[VECTOR_SCEVCHECK]]: -; DEBUGLOC-NEXT: [[TMP0:%.*]] = add i64 [[K]], -1, !dbg [[DBG78]] -; DEBUGLOC-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32, !dbg [[DBG78]] -; DEBUGLOC-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0, !dbg [[DBG78]] -; DEBUGLOC-NEXT: [[TMP3:%.*]] = icmp ugt i64 [[TMP0]], 4294967295, !dbg [[DBG78]] -; DEBUGLOC-NEXT: [[TMP4:%.*]] = or i1 [[TMP2]], [[TMP3]], !dbg [[DBG78]] -; DEBUGLOC-NEXT: br i1 [[TMP4]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]], !dbg [[DBG79:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP0:%.*]] = add i64 [[K]], -1, !dbg [[DBG74]] +; DEBUGLOC-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32, !dbg [[DBG74]] +; DEBUGLOC-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0, !dbg [[DBG74]] +; DEBUGLOC-NEXT: [[TMP3:%.*]] = icmp ugt i64 [[TMP0]], 4294967295, !dbg [[DBG74]] +; DEBUGLOC-NEXT: [[TMP4:%.*]] = or i1 [[TMP2]], [[TMP3]], !dbg [[DBG74]] +; DEBUGLOC-NEXT: br i1 [[TMP4]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]], !dbg [[DBG75:![0-9]+]] ; DEBUGLOC: [[VECTOR_PH]]: ; DEBUGLOC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[K]], 4 ; DEBUGLOC-NEXT: [[N_VEC:%.*]] = sub i64 [[K]], [[N_MOD_VF]] -; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG79]] +; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG75]] ; DEBUGLOC: [[VECTOR_BODY]]: -; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG79]] -; DEBUGLOC-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG80:![0-9]+]] -; DEBUGLOC-NEXT: [[TMP5:%.*]] = trunc i64 [[INDEX]] to i32, !dbg [[DBG80]] -; DEBUGLOC-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP5]], !dbg [[DBG81:![0-9]+]] -; DEBUGLOC-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP6]], align 4, !dbg [[DBG82:![0-9]+]] -; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG79]] -; DEBUGLOC-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4), !dbg [[DBG80]] -; DEBUGLOC-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG83:![0-9]+]] -; DEBUGLOC-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG83]], !llvm.loop [[LOOP84:![0-9]+]] +; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG75]] +; DEBUGLOC-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG76:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP5:%.*]] = trunc i64 [[INDEX]] to i32, !dbg [[DBG76]] +; DEBUGLOC-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP5]], !dbg [[DBG77:![0-9]+]] +; DEBUGLOC-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP6]], align 4, !dbg [[DBG78:![0-9]+]] +; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG75]] +; DEBUGLOC-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4), !dbg [[DBG76]] +; DEBUGLOC-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG79:![0-9]+]] +; DEBUGLOC-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG79]], !llvm.loop [[LOOP80:![0-9]+]] ; DEBUGLOC: [[MIDDLE_BLOCK]]: -; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[K]], [[N_VEC]], !dbg [[DBG83]] -; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG83]] +; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[K]], [[N_VEC]], !dbg [[DBG79]] +; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG79]] ; DEBUGLOC: [[SCALAR_PH]]: -; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], !dbg [[DBG79]] -; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG78]] +; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], !dbg [[DBG75]] +; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG74]] ; DEBUGLOC: [[LOOP]]: -; DEBUGLOC-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG79]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[IV]], [[META73:![0-9]+]], !DIExpression(), [[DBG79]]) -; DEBUGLOC-NEXT: [[TRUNC_IV:%.*]] = trunc i64 [[IV]] to i32, !dbg [[DBG80]] -; DEBUGLOC-NEXT: #dbg_value(i32 [[TRUNC_IV]], [[META74:![0-9]+]], !DIExpression(), [[DBG80]]) -; DEBUGLOC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TRUNC_IV]], !dbg [[DBG81]] -; DEBUGLOC-NEXT: #dbg_value(ptr [[ARRAYIDX]], [[META75:![0-9]+]], !DIExpression(), [[DBG81]]) -; DEBUGLOC-NEXT: store i32 [[TRUNC_IV]], ptr [[ARRAYIDX]], align 4, !dbg [[DBG82]] -; DEBUGLOC-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1, !dbg [[DBG85:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[IV_NEXT]], [[META76:![0-9]+]], !DIExpression(), [[DBG85]]) -; DEBUGLOC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[K]], !dbg [[DBG86:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i1 [[EXITCOND]], [[META77:![0-9]+]], !DIExpression(), [[DBG86]]) -; DEBUGLOC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG83]], !llvm.loop [[LOOP87:![0-9]+]] +; DEBUGLOC-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG75]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[IV]], [[META69:![0-9]+]], !DIExpression(), [[DBG75]]) +; DEBUGLOC-NEXT: [[TRUNC_IV:%.*]] = trunc i64 [[IV]] to i32, !dbg [[DBG76]] +; DEBUGLOC-NEXT: #dbg_value(i32 [[TRUNC_IV]], [[META70:![0-9]+]], !DIExpression(), [[DBG76]]) +; DEBUGLOC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TRUNC_IV]], !dbg [[DBG77]] +; DEBUGLOC-NEXT: #dbg_value(ptr [[ARRAYIDX]], [[META71:![0-9]+]], !DIExpression(), [[DBG77]]) +; DEBUGLOC-NEXT: store i32 [[TRUNC_IV]], ptr [[ARRAYIDX]], align 4, !dbg [[DBG78]] +; DEBUGLOC-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1, !dbg [[DBG81:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[IV_NEXT]], [[META72:![0-9]+]], !DIExpression(), [[DBG81]]) +; DEBUGLOC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[K]], !dbg [[DBG82:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(i1 [[EXITCOND]], [[META73:![0-9]+]], !DIExpression(), [[DBG82]]) +; DEBUGLOC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG79]], !llvm.loop [[LOOP83:![0-9]+]] ; DEBUGLOC: [[EXIT]]: -; DEBUGLOC-NEXT: ret void, !dbg [[DBG88:![0-9]+]] +; DEBUGLOC-NEXT: ret void, !dbg [[DBG84:![0-9]+]] ; entry: br label %loop @@ -522,7 +494,7 @@ define void @widen_intrinsic_dbg(i64 %n, ptr %y, ptr %x) { ; CHECK-NEXT: store <4 x float> [[TMP2]], ptr [[TMP3]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -538,60 +510,60 @@ define void @widen_intrinsic_dbg(i64 %n, ptr %y, ptr %x) { ; CHECK-NEXT: store float [[CALL]], ptr [[GEP_X]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; ; DEBUGLOC-LABEL: define void @widen_intrinsic_dbg( -; DEBUGLOC-SAME: i64 [[N:%.*]], ptr [[Y:%.*]], ptr [[X:%.*]]) !dbg [[DBG89:![0-9]+]] { +; DEBUGLOC-SAME: i64 [[N:%.*]], ptr [[Y:%.*]], ptr [[X:%.*]]) !dbg [[DBG85:![0-9]+]] { ; DEBUGLOC-NEXT: [[ENTRY:.*]]: -; DEBUGLOC-NEXT: [[Y2:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG98:![0-9]+]] -; DEBUGLOC-NEXT: [[X1:%.*]] = ptrtoint ptr [[X]] to i64, !dbg [[DBG98]] -; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4, !dbg [[DBG98]] -; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]], !dbg [[DBG98]] +; DEBUGLOC-NEXT: [[Y2:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG94:![0-9]+]] +; DEBUGLOC-NEXT: [[X1:%.*]] = ptrtoint ptr [[X]] to i64, !dbg [[DBG94]] +; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4, !dbg [[DBG94]] +; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]], !dbg [[DBG94]] ; DEBUGLOC: [[VECTOR_MEMCHECK]]: -; DEBUGLOC-NEXT: [[TMP0:%.*]] = sub i64 [[X1]], [[Y2]], !dbg [[DBG98]] -; DEBUGLOC-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16, !dbg [[DBG98]] -; DEBUGLOC-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]], !dbg [[DBG99:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP0:%.*]] = sub i64 [[X1]], [[Y2]], !dbg [[DBG94]] +; DEBUGLOC-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16, !dbg [[DBG94]] +; DEBUGLOC-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]], !dbg [[DBG95:![0-9]+]] ; DEBUGLOC: [[VECTOR_PH]]: ; DEBUGLOC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4 ; DEBUGLOC-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG99]] +; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG95]] ; DEBUGLOC: [[VECTOR_BODY]]: -; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG99]] -; DEBUGLOC-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDEX]], !dbg [[DBG100:![0-9]+]] -; DEBUGLOC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4, !dbg [[DBG101:![0-9]+]] -; DEBUGLOC-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.sqrt.v4f32(<4 x float> [[WIDE_LOAD]]), !dbg [[DBG102:![0-9]+]] -; DEBUGLOC-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDEX]], !dbg [[DBG103:![0-9]+]] -; DEBUGLOC-NEXT: store <4 x float> [[TMP2]], ptr [[TMP3]], align 4, !dbg [[DBG104:![0-9]+]] -; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG99]] -; DEBUGLOC-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG105:![0-9]+]] -; DEBUGLOC-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG105]], !llvm.loop [[LOOP106:![0-9]+]] +; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG95]] +; DEBUGLOC-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDEX]], !dbg [[DBG96:![0-9]+]] +; DEBUGLOC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4, !dbg [[DBG97:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.sqrt.v4f32(<4 x float> [[WIDE_LOAD]]), !dbg [[DBG98:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDEX]], !dbg [[DBG99:![0-9]+]] +; DEBUGLOC-NEXT: store <4 x float> [[TMP2]], ptr [[TMP3]], align 4, !dbg [[DBG100:![0-9]+]] +; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG95]] +; DEBUGLOC-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG101:![0-9]+]] +; DEBUGLOC-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG101]], !llvm.loop [[LOOP102:![0-9]+]] ; DEBUGLOC: [[MIDDLE_BLOCK]]: -; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]], !dbg [[DBG105]] -; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG105]] +; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]], !dbg [[DBG101]] +; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG101]] ; DEBUGLOC: [[SCALAR_PH]]: -; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ], !dbg [[DBG99]] -; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG98]] +; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ], !dbg [[DBG95]] +; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG94]] ; DEBUGLOC: [[LOOP]]: -; DEBUGLOC-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG99]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[IV]], [[META91:![0-9]+]], !DIExpression(), [[DBG99]]) -; DEBUGLOC-NEXT: [[GEP_Y:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[IV]], !dbg [[DBG100]] -; DEBUGLOC-NEXT: #dbg_value(ptr [[GEP_Y]], [[META92:![0-9]+]], !DIExpression(), [[DBG100]]) -; DEBUGLOC-NEXT: [[LOAD:%.*]] = load float, ptr [[GEP_Y]], align 4, !dbg [[DBG101]] -; DEBUGLOC-NEXT: #dbg_value(float [[LOAD]], [[META93:![0-9]+]], !DIExpression(), [[DBG101]]) -; DEBUGLOC-NEXT: [[CALL:%.*]] = call float @llvm.sqrt.f32(float [[LOAD]]), !dbg [[DBG102]] -; DEBUGLOC-NEXT: #dbg_value(float [[CALL]], [[META94:![0-9]+]], !DIExpression(), [[DBG102]]) -; DEBUGLOC-NEXT: [[GEP_X:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[IV]], !dbg [[DBG103]] -; DEBUGLOC-NEXT: #dbg_value(ptr [[GEP_X]], [[META95:![0-9]+]], !DIExpression(), [[DBG103]]) -; DEBUGLOC-NEXT: store float [[CALL]], ptr [[GEP_X]], align 4, !dbg [[DBG104]] -; DEBUGLOC-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !dbg [[DBG107:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[IV_NEXT]], [[META96:![0-9]+]], !DIExpression(), [[DBG107]]) -; DEBUGLOC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]], !dbg [[DBG108:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i1 [[EXITCOND]], [[META97:![0-9]+]], !DIExpression(), [[DBG108]]) -; DEBUGLOC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG105]], !llvm.loop [[LOOP109:![0-9]+]] +; DEBUGLOC-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG95]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[IV]], [[META87:![0-9]+]], !DIExpression(), [[DBG95]]) +; DEBUGLOC-NEXT: [[GEP_Y:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[IV]], !dbg [[DBG96]] +; DEBUGLOC-NEXT: #dbg_value(ptr [[GEP_Y]], [[META88:![0-9]+]], !DIExpression(), [[DBG96]]) +; DEBUGLOC-NEXT: [[LOAD:%.*]] = load float, ptr [[GEP_Y]], align 4, !dbg [[DBG97]] +; DEBUGLOC-NEXT: #dbg_value(float [[LOAD]], [[META89:![0-9]+]], !DIExpression(), [[DBG97]]) +; DEBUGLOC-NEXT: [[CALL:%.*]] = call float @llvm.sqrt.f32(float [[LOAD]]), !dbg [[DBG98]] +; DEBUGLOC-NEXT: #dbg_value(float [[CALL]], [[META90:![0-9]+]], !DIExpression(), [[DBG98]]) +; DEBUGLOC-NEXT: [[GEP_X:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[IV]], !dbg [[DBG99]] +; DEBUGLOC-NEXT: #dbg_value(ptr [[GEP_X]], [[META91:![0-9]+]], !DIExpression(), [[DBG99]]) +; DEBUGLOC-NEXT: store float [[CALL]], ptr [[GEP_X]], align 4, !dbg [[DBG100]] +; DEBUGLOC-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !dbg [[DBG103:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[IV_NEXT]], [[META92:![0-9]+]], !DIExpression(), [[DBG103]]) +; DEBUGLOC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]], !dbg [[DBG104:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(i1 [[EXITCOND]], [[META93:![0-9]+]], !DIExpression(), [[DBG104]]) +; DEBUGLOC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG101]], !llvm.loop [[LOOP105:![0-9]+]] ; DEBUGLOC: [[EXIT]]: -; DEBUGLOC-NEXT: ret void, !dbg [[DBG110:![0-9]+]] +; DEBUGLOC-NEXT: ret void, !dbg [[DBG106:![0-9]+]] ; entry: br label %loop @@ -618,23 +590,21 @@ exit: ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} -; CHECK: [[META4]] = !{!"llvm.loop.vectorize.width", i32 4} +; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META2]], [[META1]]} ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} ; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]} ; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} -; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]} +; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]]} ; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]} ; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]]} -; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]]} -; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]]} ;. ; DEBUGLOC: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C, file: [[META1:![0-9]+]], producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug) ; DEBUGLOC: [[META1]] = !DIFile(filename: "{{.*}}<stdin>", directory: {{.*}}) ; DEBUGLOC: [[DBG5]] = distinct !DISubprogram(name: "_Z3fooPf", linkageName: "_Z3fooPf", scope: null, file: [[META1]], line: 1, type: [[META6:![0-9]+]], scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META8:![0-9]+]]) ; DEBUGLOC: [[META6]] = !DISubroutineType(types: [[META7:![0-9]+]]) ; DEBUGLOC: [[META7]] = !{} -; DEBUGLOC: [[META8]] = !{[[META9]], [[META11]], [[META12]], [[META14]], [[META15]], [[META16]]} +; DEBUGLOC: [[META8]] = !{[[META9:![0-9]+]], [[META11:![0-9]+]], [[META12:![0-9]+]], [[META14:![0-9]+]], [[META15:![0-9]+]], [[META16:![0-9]+]]} ; DEBUGLOC: [[META9]] = !DILocalVariable(name: "1", scope: [[DBG5]], file: [[META1]], line: 2, type: [[META10:![0-9]+]]) ; DEBUGLOC: [[META10]] = !DIBasicType(name: "ty64", size: 64, encoding: DW_ATE_unsigned) ; DEBUGLOC: [[META11]] = !DILocalVariable(name: "2", scope: [[DBG5]], file: [[META1]], line: 3, type: [[META10]]) @@ -654,87 +624,83 @@ exit: ; DEBUGLOC: [[LOOP25]] = distinct !{[[LOOP25]], [[META26:![0-9]+]], [[META27:![0-9]+]]} ; DEBUGLOC: [[META26]] = !{!"llvm.loop.isvectorized", i32 1} ; DEBUGLOC: [[META27]] = !{!"llvm.loop.unroll.runtime.disable"} -; DEBUGLOC: [[DBG28]] = !DILocation(line: 7, column: 1, scope: [[DBG5]]) -; DEBUGLOC: [[DBG29]] = !DILocation(line: 8, column: 1, scope: [[DBG5]]) -; DEBUGLOC: [[LOOP30]] = distinct !{[[LOOP30]], [[META31:![0-9]+]]} -; DEBUGLOC: [[META31]] = !{!"llvm.loop.vectorize.width", i32 4} -; DEBUGLOC: [[DBG32]] = !DILocation(line: 10, column: 1, scope: [[DBG5]]) -; DEBUGLOC: [[DBG33]] = distinct !DISubprogram(name: "widen_ptr_induction_dbg", linkageName: "widen_ptr_induction_dbg", scope: null, file: [[META1]], line: 11, type: [[META6]], scopeLine: 11, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META34:![0-9]+]]) -; DEBUGLOC: [[META34]] = !{[[META35]], [[META36]], [[META37]]} -; DEBUGLOC: [[META35]] = !DILocalVariable(name: "7", scope: [[DBG33]], file: [[META1]], line: 12, type: [[META10]]) -; DEBUGLOC: [[META36]] = !DILocalVariable(name: "8", scope: [[DBG33]], file: [[META1]], line: 13, type: [[META10]]) -; DEBUGLOC: [[META37]] = !DILocalVariable(name: "9", scope: [[DBG33]], file: [[META1]], line: 15, type: [[META17]]) -; DEBUGLOC: [[DBG38]] = !DILocation(line: 11, column: 1, scope: [[DBG33]]) -; DEBUGLOC: [[DBG39]] = !DILocation(line: 12, column: 1, scope: [[DBG33]]) -; DEBUGLOC: [[DBG40]] = !DILocation(line: 14, column: 1, scope: [[DBG33]]) -; DEBUGLOC: [[DBG41]] = !DILocation(line: 16, column: 1, scope: [[DBG33]]) -; DEBUGLOC: [[LOOP42]] = distinct !{[[LOOP42]], [[META26]], [[META27]]} -; DEBUGLOC: [[DBG43]] = !DILocation(line: 13, column: 1, scope: [[DBG33]]) -; DEBUGLOC: [[DBG44]] = !DILocation(line: 15, column: 1, scope: [[DBG33]]) -; DEBUGLOC: [[LOOP45]] = distinct !{[[LOOP45]], [[META27]], [[META26]]} -; DEBUGLOC: [[DBG46]] = !DILocation(line: 17, column: 1, scope: [[DBG33]]) -; DEBUGLOC: [[DBG47]] = distinct !DISubprogram(name: "predicated_phi_dbg", linkageName: "predicated_phi_dbg", scope: null, file: [[META1]], line: 18, type: [[META6]], scopeLine: 18, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META48:![0-9]+]]) -; DEBUGLOC: [[META48]] = !{[[META49]], [[META50]], [[META51]], [[META52]], [[META53]], [[META54]], [[META55]]} -; DEBUGLOC: [[META49]] = !DILocalVariable(name: "10", scope: [[DBG47]], file: [[META1]], line: 19, type: [[META10]]) -; DEBUGLOC: [[META50]] = !DILocalVariable(name: "11", scope: [[DBG47]], file: [[META1]], line: 20, type: [[META17]]) -; DEBUGLOC: [[META51]] = !DILocalVariable(name: "12", scope: [[DBG47]], file: [[META1]], line: 22, type: [[META10]]) -; DEBUGLOC: [[META52]] = !DILocalVariable(name: "13", scope: [[DBG47]], file: [[META1]], line: 24, type: [[META10]]) -; DEBUGLOC: [[META53]] = !DILocalVariable(name: "14", scope: [[DBG47]], file: [[META1]], line: 25, type: [[META10]]) -; DEBUGLOC: [[META54]] = !DILocalVariable(name: "15", scope: [[DBG47]], file: [[META1]], line: 27, type: [[META10]]) -; DEBUGLOC: [[META55]] = !DILocalVariable(name: "16", scope: [[DBG47]], file: [[META1]], line: 28, type: [[META17]]) -; DEBUGLOC: [[DBG56]] = !DILocation(line: 18, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG57]] = !DILocation(line: 19, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG58]] = !DILocation(line: 20, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG59]] = !DILocation(line: 22, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG60]] = !DILocation(line: 24, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG61]] = !DILocation(line: 25, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG62]] = !DILocation(line: 26, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG63]] = !DILocation(line: 29, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[LOOP64]] = distinct !{[[LOOP64]], [[META26]], [[META27]]} -; DEBUGLOC: [[DBG65]] = !DILocation(line: 21, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG66]] = !DILocation(line: 23, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG67]] = !DILocation(line: 27, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG68]] = !DILocation(line: 28, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[LOOP69]] = distinct !{[[LOOP69]], [[META27]], [[META26]]} -; DEBUGLOC: [[DBG70]] = !DILocation(line: 30, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG71]] = distinct !DISubprogram(name: "scalar_cast_dbg", linkageName: "scalar_cast_dbg", scope: null, file: [[META1]], line: 31, type: [[META6]], scopeLine: 31, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META72:![0-9]+]]) -; DEBUGLOC: [[META72]] = !{[[META73]], [[META74]], [[META75]], [[META76]], [[META77]]} -; DEBUGLOC: [[META73]] = !DILocalVariable(name: "17", scope: [[DBG71]], file: [[META1]], line: 32, type: [[META10]]) -; DEBUGLOC: [[META74]] = !DILocalVariable(name: "18", scope: [[DBG71]], file: [[META1]], line: 33, type: [[META13]]) -; DEBUGLOC: [[META75]] = !DILocalVariable(name: "19", scope: [[DBG71]], file: [[META1]], line: 34, type: [[META10]]) -; DEBUGLOC: [[META76]] = !DILocalVariable(name: "20", scope: [[DBG71]], file: [[META1]], line: 36, type: [[META10]]) -; DEBUGLOC: [[META77]] = !DILocalVariable(name: "21", scope: [[DBG71]], file: [[META1]], line: 37, type: [[META17]]) -; DEBUGLOC: [[DBG78]] = !DILocation(line: 31, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[DBG79]] = !DILocation(line: 32, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[DBG80]] = !DILocation(line: 33, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[DBG81]] = !DILocation(line: 34, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[DBG82]] = !DILocation(line: 35, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[DBG83]] = !DILocation(line: 38, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[LOOP84]] = distinct !{[[LOOP84]], [[META26]], [[META27]]} -; DEBUGLOC: [[DBG85]] = !DILocation(line: 36, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[DBG86]] = !DILocation(line: 37, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[LOOP87]] = distinct !{[[LOOP87]], [[META26]]} -; DEBUGLOC: [[DBG88]] = !DILocation(line: 39, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[DBG89]] = distinct !DISubprogram(name: "widen_intrinsic_dbg", linkageName: "widen_intrinsic_dbg", scope: null, file: [[META1]], line: 40, type: [[META6]], scopeLine: 40, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META90:![0-9]+]]) -; DEBUGLOC: [[META90]] = !{[[META91]], [[META92]], [[META93]], [[META94]], [[META95]], [[META96]], [[META97]]} -; DEBUGLOC: [[META91]] = !DILocalVariable(name: "22", scope: [[DBG89]], file: [[META1]], line: 41, type: [[META10]]) -; DEBUGLOC: [[META92]] = !DILocalVariable(name: "23", scope: [[DBG89]], file: [[META1]], line: 42, type: [[META10]]) -; DEBUGLOC: [[META93]] = !DILocalVariable(name: "24", scope: [[DBG89]], file: [[META1]], line: 43, type: [[META13]]) -; DEBUGLOC: [[META94]] = !DILocalVariable(name: "25", scope: [[DBG89]], file: [[META1]], line: 44, type: [[META13]]) -; DEBUGLOC: [[META95]] = !DILocalVariable(name: "26", scope: [[DBG89]], file: [[META1]], line: 45, type: [[META10]]) -; DEBUGLOC: [[META96]] = !DILocalVariable(name: "27", scope: [[DBG89]], file: [[META1]], line: 47, type: [[META10]]) -; DEBUGLOC: [[META97]] = !DILocalVariable(name: "28", scope: [[DBG89]], file: [[META1]], line: 48, type: [[META17]]) -; DEBUGLOC: [[DBG98]] = !DILocation(line: 40, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG99]] = !DILocation(line: 41, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG100]] = !DILocation(line: 42, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG101]] = !DILocation(line: 43, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG102]] = !DILocation(line: 44, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG103]] = !DILocation(line: 45, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG104]] = !DILocation(line: 46, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG105]] = !DILocation(line: 49, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[LOOP106]] = distinct !{[[LOOP106]], [[META26]], [[META27]]} -; DEBUGLOC: [[DBG107]] = !DILocation(line: 47, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG108]] = !DILocation(line: 48, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[LOOP109]] = distinct !{[[LOOP109]], [[META26]]} -; DEBUGLOC: [[DBG110]] = !DILocation(line: 50, column: 1, scope: [[DBG89]]) +; DEBUGLOC: [[DBG28]] = !DILocation(line: 10, column: 1, scope: [[DBG5]]) +; DEBUGLOC: [[DBG29]] = distinct !DISubprogram(name: "widen_ptr_induction_dbg", linkageName: "widen_ptr_induction_dbg", scope: null, file: [[META1]], line: 11, type: [[META6]], scopeLine: 11, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META30:![0-9]+]]) +; DEBUGLOC: [[META30]] = !{[[META31]], [[META32]], [[META33]]} +; DEBUGLOC: [[META31]] = !DILocalVariable(name: "7", scope: [[DBG29]], file: [[META1]], line: 12, type: [[META10]]) +; DEBUGLOC: [[META32]] = !DILocalVariable(name: "8", scope: [[DBG29]], file: [[META1]], line: 13, type: [[META10]]) +; DEBUGLOC: [[META33]] = !DILocalVariable(name: "9", scope: [[DBG29]], file: [[META1]], line: 15, type: [[META17]]) +; DEBUGLOC: [[DBG34]] = !DILocation(line: 11, column: 1, scope: [[DBG29]]) +; DEBUGLOC: [[DBG35]] = !DILocation(line: 12, column: 1, scope: [[DBG29]]) +; DEBUGLOC: [[DBG36]] = !DILocation(line: 14, column: 1, scope: [[DBG29]]) +; DEBUGLOC: [[DBG37]] = !DILocation(line: 16, column: 1, scope: [[DBG29]]) +; DEBUGLOC: [[LOOP38]] = distinct !{[[LOOP38]], [[META26]], [[META27]]} +; DEBUGLOC: [[DBG39]] = !DILocation(line: 13, column: 1, scope: [[DBG29]]) +; DEBUGLOC: [[DBG40]] = !DILocation(line: 15, column: 1, scope: [[DBG29]]) +; DEBUGLOC: [[LOOP41]] = distinct !{[[LOOP41]], [[META27]], [[META26]]} +; DEBUGLOC: [[DBG42]] = !DILocation(line: 17, column: 1, scope: [[DBG29]]) +; DEBUGLOC: [[DBG43]] = distinct !DISubprogram(name: "predicated_phi_dbg", linkageName: "predicated_phi_dbg", scope: null, file: [[META1]], line: 18, type: [[META6]], scopeLine: 18, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META44:![0-9]+]]) +; DEBUGLOC: [[META44]] = !{[[META45]], [[META46]], [[META47]], [[META48]], [[META49]], [[META50]], [[META51]]} +; DEBUGLOC: [[META45]] = !DILocalVariable(name: "10", scope: [[DBG43]], file: [[META1]], line: 19, type: [[META10]]) +; DEBUGLOC: [[META46]] = !DILocalVariable(name: "11", scope: [[DBG43]], file: [[META1]], line: 20, type: [[META17]]) +; DEBUGLOC: [[META47]] = !DILocalVariable(name: "12", scope: [[DBG43]], file: [[META1]], line: 22, type: [[META10]]) +; DEBUGLOC: [[META48]] = !DILocalVariable(name: "13", scope: [[DBG43]], file: [[META1]], line: 24, type: [[META10]]) +; DEBUGLOC: [[META49]] = !DILocalVariable(name: "14", scope: [[DBG43]], file: [[META1]], line: 25, type: [[META10]]) +; DEBUGLOC: [[META50]] = !DILocalVariable(name: "15", scope: [[DBG43]], file: [[META1]], line: 27, type: [[META10]]) +; DEBUGLOC: [[META51]] = !DILocalVariable(name: "16", scope: [[DBG43]], file: [[META1]], line: 28, type: [[META17]]) +; DEBUGLOC: [[DBG52]] = !DILocation(line: 18, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG53]] = !DILocation(line: 19, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG54]] = !DILocation(line: 20, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG55]] = !DILocation(line: 22, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG56]] = !DILocation(line: 24, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG57]] = !DILocation(line: 25, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG58]] = !DILocation(line: 26, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG59]] = !DILocation(line: 29, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[LOOP60]] = distinct !{[[LOOP60]], [[META26]], [[META27]]} +; DEBUGLOC: [[DBG61]] = !DILocation(line: 21, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG62]] = !DILocation(line: 23, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG63]] = !DILocation(line: 27, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG64]] = !DILocation(line: 28, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[LOOP65]] = distinct !{[[LOOP65]], [[META27]], [[META26]]} +; DEBUGLOC: [[DBG66]] = !DILocation(line: 30, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG67]] = distinct !DISubprogram(name: "scalar_cast_dbg", linkageName: "scalar_cast_dbg", scope: null, file: [[META1]], line: 31, type: [[META6]], scopeLine: 31, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META68:![0-9]+]]) +; DEBUGLOC: [[META68]] = !{[[META69]], [[META70]], [[META71]], [[META72]], [[META73]]} +; DEBUGLOC: [[META69]] = !DILocalVariable(name: "17", scope: [[DBG67]], file: [[META1]], line: 32, type: [[META10]]) +; DEBUGLOC: [[META70]] = !DILocalVariable(name: "18", scope: [[DBG67]], file: [[META1]], line: 33, type: [[META13]]) +; DEBUGLOC: [[META71]] = !DILocalVariable(name: "19", scope: [[DBG67]], file: [[META1]], line: 34, type: [[META10]]) +; DEBUGLOC: [[META72]] = !DILocalVariable(name: "20", scope: [[DBG67]], file: [[META1]], line: 36, type: [[META10]]) +; DEBUGLOC: [[META73]] = !DILocalVariable(name: "21", scope: [[DBG67]], file: [[META1]], line: 37, type: [[META17]]) +; DEBUGLOC: [[DBG74]] = !DILocation(line: 31, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[DBG75]] = !DILocation(line: 32, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[DBG76]] = !DILocation(line: 33, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[DBG77]] = !DILocation(line: 34, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[DBG78]] = !DILocation(line: 35, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[DBG79]] = !DILocation(line: 38, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[LOOP80]] = distinct !{[[LOOP80]], [[META26]], [[META27]]} +; DEBUGLOC: [[DBG81]] = !DILocation(line: 36, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[DBG82]] = !DILocation(line: 37, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[LOOP83]] = distinct !{[[LOOP83]], [[META26]]} +; DEBUGLOC: [[DBG84]] = !DILocation(line: 39, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[DBG85]] = distinct !DISubprogram(name: "widen_intrinsic_dbg", linkageName: "widen_intrinsic_dbg", scope: null, file: [[META1]], line: 40, type: [[META6]], scopeLine: 40, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META86:![0-9]+]]) +; DEBUGLOC: [[META86]] = !{[[META87]], [[META88]], [[META89]], [[META90]], [[META91]], [[META92]], [[META93]]} +; DEBUGLOC: [[META87]] = !DILocalVariable(name: "22", scope: [[DBG85]], file: [[META1]], line: 41, type: [[META10]]) +; DEBUGLOC: [[META88]] = !DILocalVariable(name: "23", scope: [[DBG85]], file: [[META1]], line: 42, type: [[META10]]) +; DEBUGLOC: [[META89]] = !DILocalVariable(name: "24", scope: [[DBG85]], file: [[META1]], line: 43, type: [[META13]]) +; DEBUGLOC: [[META90]] = !DILocalVariable(name: "25", scope: [[DBG85]], file: [[META1]], line: 44, type: [[META13]]) +; DEBUGLOC: [[META91]] = !DILocalVariable(name: "26", scope: [[DBG85]], file: [[META1]], line: 45, type: [[META10]]) +; DEBUGLOC: [[META92]] = !DILocalVariable(name: "27", scope: [[DBG85]], file: [[META1]], line: 47, type: [[META10]]) +; DEBUGLOC: [[META93]] = !DILocalVariable(name: "28", scope: [[DBG85]], file: [[META1]], line: 48, type: [[META17]]) +; DEBUGLOC: [[DBG94]] = !DILocation(line: 40, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG95]] = !DILocation(line: 41, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG96]] = !DILocation(line: 42, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG97]] = !DILocation(line: 43, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG98]] = !DILocation(line: 44, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG99]] = !DILocation(line: 45, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG100]] = !DILocation(line: 46, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG101]] = !DILocation(line: 49, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[LOOP102]] = distinct !{[[LOOP102]], [[META26]], [[META27]]} +; DEBUGLOC: [[DBG103]] = !DILocation(line: 47, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG104]] = !DILocation(line: 48, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[LOOP105]] = distinct !{[[LOOP105]], [[META26]]} +; DEBUGLOC: [[DBG106]] = !DILocation(line: 50, column: 1, scope: [[DBG85]]) ;. diff --git a/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-reduction-inloop.ll index 57f0dc2..787fa31 100644 --- a/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-reduction-inloop.ll +++ b/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-reduction-inloop.ll @@ -22,7 +22,7 @@ loop: %load = load i32, ptr %gep, align 4 %red.next = add i32 %red, %load %iv.next = add i64 %iv, 1 - %exitcond = icmp eq i64 %iv.next, 256 + %exitcond = icmp eq i64 %iv.next, 257 br i1 %exitcond, label %exit, label %loop exit: diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll index f20d492..73ddddc 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll @@ -20,10 +20,6 @@ define i32 @reduction_smin(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[RDX_MINMAX]] ; @@ -66,10 +62,6 @@ define i32 @reduction_smin_select_ops_flipped(ptr nocapture %A, ptr nocapture %B ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[RDX_MINMAX]] ; @@ -111,10 +103,6 @@ define i32 @reduction_smin_intrinsic(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP1]]) ; CHECK-NEXT: ret i32 [[TMP3]] @@ -159,10 +147,6 @@ define i32 @reduction_umax(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[RDX_MINMAX]] ; @@ -205,10 +189,6 @@ define i32 @reduction_umax_select_ops_flipped(ptr nocapture %A, ptr nocapture %B ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[RDX_MINMAX]] ; @@ -250,10 +230,6 @@ define i32 @reduction_umax_intrinsic(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[TMP1]]) ; CHECK-NEXT: ret i32 [[TMP3]] diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll index 925290b..1b9dcad 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll @@ -61,11 +61,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP26]] ; @@ -170,11 +166,7 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP49]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP48]] ; @@ -263,11 +255,7 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP29]] ; @@ -373,11 +361,7 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP49]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP48]] ; @@ -485,11 +469,7 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP47:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP47]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP46]] ; @@ -594,11 +574,7 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP46]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP45]] ; @@ -701,11 +677,7 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP46]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[TMP45]] ; @@ -806,11 +778,7 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[TMP43]] ; @@ -911,11 +879,7 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[TMP43]] ; @@ -1016,11 +980,7 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret float [[TMP43]] ; @@ -1123,11 +1083,7 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP46]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret float [[TMP45]] ; @@ -1211,11 +1167,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[RDX_MINMAX]] ; @@ -1297,11 +1249,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[RDX_MINMAX]] ; @@ -1356,21 +1304,7 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[IF_THEN:%.*]], label [[FOR_INC:%.*]] -; CHECK: if.then: -; CHECK-NEXT: br i1 poison, label [[IF_THEN8:%.*]], label [[IF_ELSE:%.*]] -; CHECK: if.then8: -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: if.else: -; CHECK-NEXT: br i1 poison, label [[IF_THEN16:%.*]], label [[FOR_INC]] -; CHECK: if.then16: -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_END]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: for.end: ; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[PREDPHI3]]) ; CHECK-NEXT: ret float [[SUM_1_LCSSA]] @@ -1478,11 +1412,7 @@ define i8 @reduction_add_trunc(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP31:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[TMP32:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP30]], <4 x i32> [[VEC_PHI]] ; CHECK-NEXT: [[TMP33:%.*]] = trunc <4 x i32> [[TMP32]] to <4 x i8> @@ -1572,11 +1502,7 @@ define i8 @reduction_and_trunc(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[TMP31:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP29]], <4 x i32> [[VEC_PHI]] ; CHECK-NEXT: [[TMP32:%.*]] = trunc <4 x i32> [[TMP31]] to <4 x i8> diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll index cad3ca1..183462f 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll @@ -35,11 +35,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP7]], [[TMP5]] ; CHECK-NEXT: [[BIN_RDX7:%.*]] = add i32 [[TMP9]], [[BIN_RDX]] @@ -114,11 +110,7 @@ define i64 @reduction_sum_chain(ptr noalias %p, ptr noalias %q) { ; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: [[BIN_RDX:%.*]] = add i64 [[TMP19]], [[TMP17]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add i64 [[TMP21]], [[BIN_RDX]] @@ -345,11 +337,7 @@ define i32 @predicated(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP111:%.*]] = icmp eq i64 [[INDEX_NEXT]], 272 ; CHECK-NEXT: br i1 [[TMP111]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP104]], [[TMP101]] ; CHECK-NEXT: [[BIN_RDX34:%.*]] = add i32 [[TMP107]], [[BIN_RDX]] @@ -581,17 +569,9 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) { ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 16) ; CHECK-NEXT: [[TMP119:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP119]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP119]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[IF_THEN:%.*]], label [[FOR_INC:%.*]] -; CHECK: if.then: -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: for.end: ; CHECK-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP112]], [[TMP109]] ; CHECK-NEXT: [[BIN_RDX36:%.*]] = mul i32 [[TMP115]], [[BIN_RDX]] diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll index f4d4cca..ec7fde8 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll @@ -23,21 +23,8 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) { ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L7:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-NEXT: [[L7]] = add i32 [[SUM_02]], [[L3]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L7]], %[[DOTLR_PH]] ], [ [[TMP2]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP2]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sum_single( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]]) { @@ -61,22 +48,9 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) { ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP5]], [[TMP3]] +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = add i32 [[TMP5]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L7:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L7]] = add i32 [[SUM_02]], [[L3]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L7]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]] ; entry: @@ -125,26 +99,8 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L6]] -; CHECK-NEXT: [[L8:%.*]] = add i32 [[L7]], [[L3]] -; CHECK-NEXT: [[L9]] = add i32 [[L8]], [[L5]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP7]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sum( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { @@ -183,27 +139,9 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP15]], [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = add i32 [[TMP15]], [[TMP13]] ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-INTERLEAVED-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L6]] -; CHECK-INTERLEAVED-NEXT: [[L8:%.*]] = add i32 [[L7]], [[L3]] -; CHECK-INTERLEAVED-NEXT: [[L9]] = add i32 [[L8]], [[L5]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]] ; entry: @@ -251,22 +189,8 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) { ; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L3]] -; CHECK-NEXT: [[L9]] = add i32 [[L7]], 3 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP3]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sum_const( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]]) { @@ -294,23 +218,9 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) { ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP7]], [[TMP6]] +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = add i32 [[TMP7]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L3]] -; CHECK-INTERLEAVED-NEXT: [[L9]] = add i32 [[L7]], 3 -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]] ; entry: @@ -360,26 +270,8 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[PROD_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 1, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-NEXT: [[L7:%.*]] = mul i32 [[PROD_02]], [[L6]] -; CHECK-NEXT: [[L8:%.*]] = mul i32 [[L7]], [[L3]] -; CHECK-NEXT: [[L9]] = mul i32 [[L8]], [[L5]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[PROD_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[PROD_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP7]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_prod( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { @@ -418,27 +310,9 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP15]], [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[PROD_0_LCSSA:%.*]] = mul i32 [[TMP15]], [[TMP13]] ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[PROD_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 1, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-INTERLEAVED-NEXT: [[L7:%.*]] = mul i32 [[PROD_02]], [[L6]] -; CHECK-INTERLEAVED-NEXT: [[L8:%.*]] = mul i32 [[L7]], [[L3]] -; CHECK-INTERLEAVED-NEXT: [[L9]] = mul i32 [[L8]], [[L5]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[PROD_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[PROD_0_LCSSA]] ; entry: @@ -491,26 +365,8 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-NEXT: [[L6:%.*]] = mul nsw i32 [[L5]], [[L3]] -; CHECK-NEXT: [[L7:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-NEXT: [[L8:%.*]] = add i32 [[SUM_02]], [[L7]] -; CHECK-NEXT: [[L9]] = add i32 [[L8]], [[L6]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP6]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_mix( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { @@ -547,27 +403,9 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP13]], [[TMP10]] +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = add i32 [[TMP13]], [[TMP10]] ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L6:%.*]] = mul nsw i32 [[L5]], [[L3]] -; CHECK-INTERLEAVED-NEXT: [[L7:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-INTERLEAVED-NEXT: [[L8:%.*]] = add i32 [[SUM_02]], [[L7]] -; CHECK-INTERLEAVED-NEXT: [[L9]] = add i32 [[L8]], [[L6]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]] ; entry: @@ -617,24 +455,8 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L7:%.*]], %[[DOTLR_PH]] ], [ 19, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-NEXT: [[L6:%.*]] = mul i32 [[SUM_02]], [[L3]] -; CHECK-NEXT: [[L7]] = mul i32 [[L6]], [[L5]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L7]], %[[DOTLR_PH]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP5]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_mul( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { @@ -666,25 +488,9 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP11]], [[TMP9]] +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = mul i32 [[TMP11]], [[TMP9]] ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L7:%.*]], %[[DOTLR_PH]] ], [ 19, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L6:%.*]] = mul i32 [[SUM_02]], [[L3]] -; CHECK-INTERLEAVED-NEXT: [[L7]] = mul i32 [[L6]], [[L5]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L7]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]] ; entry: @@ -731,24 +537,8 @@ define i32 @start_at_non_zero(ptr nocapture %in, ptr nocapture %coeff, ptr nocap ; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_09:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_BODY]] ], [ 120, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[COEFF]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[L1]], [[L0]] -; CHECK-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[SUM_09]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP4]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @start_at_non_zero( ; CHECK-INTERLEAVED-SAME: ptr captures(none) [[IN:%.*]], ptr captures(none) [[COEFF:%.*]], ptr captures(none) [[OUT:%.*]]) { @@ -780,24 +570,8 @@ define i32 @start_at_non_zero(ptr nocapture %in, ptr nocapture %coeff, ptr nocap ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP9]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_09:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_BODY]] ], [ 120, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[COEFF]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[MUL:%.*]] = mul nsw i32 [[L1]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[SUM_09]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]] ; entry: br label %for.body @@ -844,24 +618,8 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[AND:%.*]], %[[FOR_BODY]] ], [ -1, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = and i32 [[RESULT_08]], [[L0]] -; CHECK-NEXT: [[AND]] = and i32 [[ADD]], [[L1]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP5]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_and( ; CHECK-INTERLEAVED-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) { @@ -893,25 +651,9 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = and i32 [[TMP11]], [[TMP9]] +; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = and i32 [[TMP11]], [[TMP9]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[AND:%.*]], %[[FOR_BODY]] ], [ -1, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = and i32 [[RESULT_08]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[AND]] = and i32 [[ADD]], [[L1]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[RESULT_0_LCSSA]] ; entry: @@ -958,24 +700,8 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[OR:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[L1]], [[L0]] -; CHECK-NEXT: [[OR]] = or i32 [[ADD]], [[RESULT_08]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP4]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_or( ; CHECK-INTERLEAVED-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) { @@ -1005,25 +731,9 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = or i32 [[TMP9]], [[TMP7]] +; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = or i32 [[TMP9]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[OR:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = add nsw i32 [[L1]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[OR]] = or i32 [[ADD]], [[RESULT_08]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[RESULT_0_LCSSA]] ; entry: @@ -1070,24 +780,8 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[XOR:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[L1]], [[L0]] -; CHECK-NEXT: [[XOR]] = xor i32 [[ADD]], [[RESULT_08]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP4]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_xor( ; CHECK-INTERLEAVED-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) { @@ -1117,25 +811,9 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = xor i32 [[TMP9]], [[TMP7]] +; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = xor i32 [[TMP9]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[XOR:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = add nsw i32 [[L1]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[XOR]] = xor i32 [[ADD]], [[RESULT_08]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[RESULT_0_LCSSA]] ; entry: @@ -1183,24 +861,8 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[RESULT_08:%.*]] = phi float [ [[FADD:%.*]], %[[FOR_BODY]] ], [ 0.000000e+00, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[RESULT_08]], [[L0]] -; CHECK-NEXT: [[FADD]] = fadd fast float [[ADD]], [[L1]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ [[FADD]], %[[FOR_BODY]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] +; CHECK-NEXT: ret float [[TMP3]] ; ; CHECK-INTERLEAVED-LABEL: define float @reduction_fadd( ; CHECK-INTERLEAVED-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) { @@ -1232,25 +894,9 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fadd fast float [[TMP7]], [[TMP6]] +; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = fadd fast float [[TMP7]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[RESULT_08:%.*]] = phi float [ [[FADD:%.*]], %[[FOR_BODY]] ], [ 0.000000e+00, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = fadd fast float [[RESULT_08]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[FADD]] = fadd fast float [[ADD]], [[L1]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ [[FADD]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret float [[RESULT_0_LCSSA]] ; entry: @@ -1298,24 +944,8 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[RESULT_08:%.*]] = phi float [ [[FMUL:%.*]], %[[FOR_BODY]] ], [ 0.000000e+00, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = fmul fast float [[RESULT_08]], [[L0]] -; CHECK-NEXT: [[FMUL]] = fmul fast float [[ADD]], [[L1]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ [[FMUL]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] +; CHECK-NEXT: ret float [[TMP5]] ; ; CHECK-INTERLEAVED-LABEL: define float @reduction_fmul( ; CHECK-INTERLEAVED-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) { @@ -1347,25 +977,9 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fmul fast float [[TMP11]], [[TMP9]] +; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = fmul fast float [[TMP11]], [[TMP9]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[RESULT_08:%.*]] = phi float [ [[FMUL:%.*]], %[[FOR_BODY]] ], [ 0.000000e+00, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = fmul fast float [[RESULT_08]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[FMUL]] = fmul fast float [[ADD]], [[L1]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ [[FMUL]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret float [[RESULT_0_LCSSA]] ; entry: @@ -1410,21 +1024,8 @@ define i32 @reduction_sub_lhs(ptr noalias nocapture %A) { ; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[X_05:%.*]] = phi i32 [ [[SUB:%.*]], %[[FOR_BODY]] ], [ 3, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[SUB]] = sub nsw i32 [[X_05]], [[L0]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[X_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP5]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sub_lhs( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]]) { @@ -1450,21 +1051,8 @@ define i32 @reduction_sub_lhs(ptr noalias nocapture %A) { ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP5]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[X_05:%.*]] = phi i32 [ [[SUB:%.*]], %[[FOR_BODY]] ], [ 3, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[SUB]] = sub nsw i32 [[X_05]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: ret i32 [[X_0_LCSSA]] +; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]] ; entry: br label %for.body @@ -1519,38 +1107,8 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP13:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[PREDPHI3]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_INC:.*]] ] -; CHECK-NEXT: [[SUM_033:%.*]] = phi float [ [[S]], %[[SCALAR_PH]] ], [ [[SUM_1:%.*]], %[[FOR_INC]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[CMP3:%.*]] = fcmp ogt float [[L0]], [[L1]] -; CHECK-NEXT: br i1 [[CMP3]], label %[[IF_THEN:.*]], label %[[FOR_INC]] -; CHECK: [[IF_THEN]]: -; CHECK-NEXT: [[CMP6:%.*]] = fcmp ogt float [[L1]], 1.000000e+00 -; CHECK-NEXT: br i1 [[CMP6]], label %[[IF_THEN8:.*]], label %[[IF_ELSE:.*]] -; CHECK: [[IF_THEN8]]: -; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[SUM_033]], [[L0]] -; CHECK-NEXT: br label %[[FOR_INC]] -; CHECK: [[IF_ELSE]]: -; CHECK-NEXT: [[CMP14:%.*]] = fcmp ogt float [[L0]], 2.000000e+00 -; CHECK-NEXT: br i1 [[CMP14]], label %[[IF_THEN16:.*]], label %[[FOR_INC]] -; CHECK: [[IF_THEN16]]: -; CHECK-NEXT: [[ADD19:%.*]] = fadd fast float [[SUM_033]], [[L1]] -; CHECK-NEXT: br label %[[FOR_INC]] -; CHECK: [[FOR_INC]]: -; CHECK-NEXT: [[SUM_1]] = phi float [ [[ADD]], %[[IF_THEN8]] ], [ [[ADD19]], %[[IF_THEN16]] ], [ [[SUM_033]], %[[IF_ELSE]] ], [ [[SUM_033]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[LFTR_WIDEIV]], 128 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[FOR_END]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi float [ [[SUM_1]], %[[FOR_INC]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[SUM_1_LCSSA]] +; CHECK-NEXT: ret float [[TMP13]] ; ; CHECK-INTERLEAVED-LABEL: define float @reduction_conditional( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], float [[S:%.*]]) { @@ -1602,38 +1160,8 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) { ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x float> [[PREDPHI9]], [[PREDPHI6]] ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_INC:.*]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_033:%.*]] = phi float [ [[S]], %[[SCALAR_PH]] ], [ [[SUM_1:%.*]], %[[FOR_INC]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[CMP3:%.*]] = fcmp ogt float [[L0]], [[L1]] -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP3]], label %[[IF_THEN:.*]], label %[[FOR_INC]] -; CHECK-INTERLEAVED: [[IF_THEN]]: -; CHECK-INTERLEAVED-NEXT: [[CMP6:%.*]] = fcmp ogt float [[L1]], 1.000000e+00 -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP6]], label %[[IF_THEN8:.*]], label %[[IF_ELSE:.*]] -; CHECK-INTERLEAVED: [[IF_THEN8]]: -; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = fadd fast float [[SUM_033]], [[L0]] -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_INC]] -; CHECK-INTERLEAVED: [[IF_ELSE]]: -; CHECK-INTERLEAVED-NEXT: [[CMP14:%.*]] = fcmp ogt float [[L0]], 2.000000e+00 -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP14]], label %[[IF_THEN16:.*]], label %[[FOR_INC]] -; CHECK-INTERLEAVED: [[IF_THEN16]]: -; CHECK-INTERLEAVED-NEXT: [[ADD19:%.*]] = fadd fast float [[SUM_033]], [[L1]] -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_INC]] -; CHECK-INTERLEAVED: [[FOR_INC]]: -; CHECK-INTERLEAVED-NEXT: [[SUM_1]] = phi float [ [[ADD]], %[[IF_THEN8]] ], [ [[ADD19]], %[[IF_THEN16]] ], [ [[SUM_033]], %[[IF_ELSE]] ], [ [[SUM_033]], %[[FOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[LFTR_WIDEIV]], 128 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[FOR_END]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[SUM_1_LCSSA:%.*]] = phi float [ [[SUM_1]], %[[FOR_INC]] ], [ [[TMP24]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: ret float [[SUM_1_LCSSA]] +; CHECK-INTERLEAVED-NEXT: ret float [[TMP24]] ; entry: br label %for.body @@ -1679,11 +1207,11 @@ for.end: define i32 @reduction_sum_multiuse(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-LABEL: define i32 @reduction_sum_multiuse( ; CHECK-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { -; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[_LR_PH1:.*]]: ; CHECK-NEXT: br label %[[DOTLR_PH:.*]] ; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L10:%.*]], %[[DOTLR_PH]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[_LR_PH1]] ] +; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L10:%.*]], %[[DOTLR_PH]] ], [ 0, %[[_LR_PH1]] ] ; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 ; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] @@ -1703,11 +1231,11 @@ define i32 @reduction_sum_multiuse(ptr noalias nocapture %A, ptr noalias nocaptu ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sum_multiuse( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { -; CHECK-INTERLEAVED-NEXT: [[ENTRY:.*]]: +; CHECK-INTERLEAVED-NEXT: [[_LR_PH1:.*]]: ; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] ; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[ENTRY]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L10:%.*]], %[[DOTLR_PH]] ], [ 0, %[[ENTRY]] ] +; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[_LR_PH1]] ] +; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L10:%.*]], %[[DOTLR_PH]] ], [ 0, %[[_LR_PH1]] ] ; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] ; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 ; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] @@ -1778,26 +1306,8 @@ define i32 @reduction_predicated(ptr noalias nocapture %A, ptr noalias nocapture ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L6]] -; CHECK-NEXT: [[L8:%.*]] = add i32 [[L7]], [[L3]] -; CHECK-NEXT: [[L9]] = add i32 [[L8]], [[L5]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP7]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_predicated( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { @@ -1836,27 +1346,9 @@ define i32 @reduction_predicated(ptr noalias nocapture %A, ptr noalias nocapture ; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP15]], [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = add i32 [[TMP15]], [[TMP13]] ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-INTERLEAVED-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L6]] -; CHECK-INTERLEAVED-NEXT: [[L8:%.*]] = add i32 [[L7]], [[L3]] -; CHECK-INTERLEAVED-NEXT: [[L9]] = add i32 [[L8]], [[L5]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]] ; entry: @@ -1902,27 +1394,13 @@ define i8 @reduction_add_trunc(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP5]] = zext <4 x i8> [[TMP4]] to <4 x i32> ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 -; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP7:%.*]] = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> [[TMP4]]) ; CHECK-NEXT: [[TMP8:%.*]] = zext i8 [[TMP7]] to i32 ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02P:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 255, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = and i32 [[SUM_02P]], 255 -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i8, ptr [[L2]], align 4 -; CHECK-NEXT: [[L3E:%.*]] = zext i8 [[L3]] to i32 -; CHECK-NEXT: [[L9]] = add i32 [[SUM_02]], [[L3E]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA1:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[SUM_0_LCSSA1]] to i8 +; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[TMP8]] to i8 ; CHECK-NEXT: ret i8 [[SUM_0_LCSSA]] ; ; CHECK-INTERLEAVED-LABEL: define i8 @reduction_add_trunc( @@ -1951,28 +1429,14 @@ define i8 @reduction_add_trunc(ptr noalias nocapture %A) { ; CHECK-INTERLEAVED-NEXT: [[TMP11]] = zext <4 x i8> [[TMP9]] to <4 x i32> ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i8> [[TMP9]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = zext i8 [[TMP13]] to i32 ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02P:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 255, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = and i32 [[SUM_02P]], 255 -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i8, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L3E:%.*]] = zext i8 [[L3]] to i32 -; CHECK-INTERLEAVED-NEXT: [[L9]] = add i32 [[SUM_02]], [[L3E]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA1:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP14]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[SUM_0_LCSSA1]] to i8 +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[TMP14]] to i8 ; CHECK-INTERLEAVED-NEXT: ret i8 [[SUM_0_LCSSA]] ; entry: @@ -2016,27 +1480,13 @@ define i8 @reduction_and_trunc(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP5]] = zext <4 x i8> [[TMP4]] to <4 x i32> ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 -; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP7:%.*]] = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> [[TMP4]]) ; CHECK-NEXT: [[TMP8:%.*]] = zext i8 [[TMP7]] to i32 ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02P:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 255, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = and i32 [[SUM_02P]], 255 -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i8, ptr [[L2]], align 4 -; CHECK-NEXT: [[L3E:%.*]] = zext i8 [[L3]] to i32 -; CHECK-NEXT: [[L9]] = and i32 [[SUM_02]], [[L3E]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA1:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[SUM_0_LCSSA1]] to i8 +; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[TMP8]] to i8 ; CHECK-NEXT: ret i8 [[SUM_0_LCSSA]] ; ; CHECK-INTERLEAVED-LABEL: define i8 @reduction_and_trunc( @@ -2065,28 +1515,14 @@ define i8 @reduction_and_trunc(ptr noalias nocapture %A) { ; CHECK-INTERLEAVED-NEXT: [[TMP11]] = zext <4 x i8> [[TMP9]] to <4 x i32> ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = and <4 x i8> [[TMP9]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = zext i8 [[TMP13]] to i32 ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02P:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 255, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = and i32 [[SUM_02P]], 255 -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i8, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L3E:%.*]] = zext i8 [[L3]] to i32 -; CHECK-INTERLEAVED-NEXT: [[L9]] = and i32 [[SUM_02]], [[L3E]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA1:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP14]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[SUM_0_LCSSA1]] to i8 +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[TMP14]] to i8 ; CHECK-INTERLEAVED-NEXT: ret i8 [[SUM_0_LCSSA]] ; entry: @@ -2133,7 +1569,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) { ; CHECK-NEXT: [[TMP4]] = fadd float [[VEC_PHI]], [[TMP3]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] @@ -2151,7 +1587,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) { ; CHECK-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP6]], float [[TMP7]], float [[SUM_07]]) ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret float [[MULADD_LCSSA]] @@ -2185,7 +1621,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) { ; CHECK-INTERLEAVED-NEXT: [[TMP9]] = fadd float [[VEC_PHI1]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fadd float [[TMP9]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2204,7 +1640,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) { ; CHECK-INTERLEAVED-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[SUM_07]]) ; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-INTERLEAVED: [[FOR_END]]: ; CHECK-INTERLEAVED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret float [[MULADD_LCSSA]] @@ -2373,7 +1809,7 @@ define float @reduction_fmuladd_blend(ptr %a, ptr %b, i64 %n, i1 %c) { ; CHECK-NEXT: [[TMP7]] = fadd float [[VEC_PHI]], [[TMP6]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -2388,17 +1824,17 @@ define float @reduction_fmuladd_blend(ptr %a, ptr %b, i64 %n, i1 %c) { ; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] ; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: br i1 [[C]], label %[[FOO:.*]], label %[[BAR:.*]] -; CHECK: [[FOO]]: +; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[ELSE:.*]] +; CHECK: [[IF]]: ; CHECK-NEXT: br label %[[LATCH]] -; CHECK: [[BAR]]: +; CHECK: [[ELSE]]: ; CHECK-NEXT: [[MULADD:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP9]], float [[TMP10]], float [[SUM]]) ; CHECK-NEXT: br label %[[LATCH]] ; CHECK: [[LATCH]]: -; CHECK-NEXT: [[SUM_NEXT]] = phi float [ [[SUM]], %[[FOO]] ], [ [[MULADD]], %[[BAR]] ] +; CHECK-NEXT: [[SUM_NEXT]] = phi float [ [[SUM]], %[[IF]] ], [ [[MULADD]], %[[ELSE]] ] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi float [ [[SUM_NEXT]], %[[LATCH]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret float [[SUM_NEXT_LCSSA]] @@ -2437,7 +1873,7 @@ define float @reduction_fmuladd_blend(ptr %a, ptr %b, i64 %n, i1 %c) { ; CHECK-INTERLEAVED-NEXT: [[TMP13]] = fadd float [[VEC_PHI1]], [[TMP12]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fadd float [[TMP13]], [[TMP10]] ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2463,7 +1899,7 @@ define float @reduction_fmuladd_blend(ptr %a, ptr %b, i64 %n, i1 %c) { ; CHECK-INTERLEAVED-NEXT: [[SUM_NEXT]] = phi float [ [[SUM]], %[[IF]] ], [ [[MULADD]], %[[ELSE]] ] ; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-INTERLEAVED: [[EXIT]]: ; CHECK-INTERLEAVED-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi float [ [[SUM_NEXT]], %[[LATCH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret float [[SUM_NEXT_LCSSA]] @@ -2524,7 +1960,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h ; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END7:.*]], label %[[SCALAR_PH]] @@ -2550,7 +1986,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h ; CHECK-NEXT: [[G_1]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[G_016]], %[[FOR_BODY2]] ] ; CHECK-NEXT: [[INC6]] = add nuw nsw i32 [[A_117]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC6]], [[I]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP27:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK: [[FOR_END7]]: ; CHECK-NEXT: [[G_1_LCSSA:%.*]] = phi i32 [ [[G_1]], %[[FOR_INC5]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[G_1_LCSSA]] @@ -2590,7 +2026,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h ; CHECK-INTERLEAVED-NEXT: [[TMP14]] = add i32 [[VEC_PHI1]], [[TMP13]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP14]], [[TMP11]] ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]] @@ -2617,7 +2053,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h ; CHECK-INTERLEAVED-NEXT: [[G_1]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[G_016]], %[[FOR_BODY2]] ] ; CHECK-INTERLEAVED-NEXT: [[INC6]] = add nuw nsw i32 [[A_117]], 1 ; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC6]], [[I]] -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP27:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK-INTERLEAVED: [[FOR_END7]]: ; CHECK-INTERLEAVED-NEXT: [[G_1_LCSSA:%.*]] = phi i32 [ [[G_1]], %[[FOR_INC5]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[G_1_LCSSA]] @@ -2680,7 +2116,7 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read ; CHECK-NEXT: [[TMP11]] = add i32 [[TMP8]], [[TMP10]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END7:.*]], label %[[SCALAR_PH]] @@ -2707,7 +2143,7 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read ; CHECK-NEXT: [[G_1]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[G_016]], %[[FOR_BODY2]] ] ; CHECK-NEXT: [[INC6]] = add nuw nsw i32 [[A_117]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC6]], [[I]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP29:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK: [[FOR_END7]]: ; CHECK-NEXT: [[G_1_LCSSA:%.*]] = phi i32 [ [[G_1]], %[[FOR_INC5]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[G_1_LCSSA]] @@ -2753,7 +2189,7 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read ; CHECK-INTERLEAVED-NEXT: [[TMP20]] = add i32 [[TMP14]], [[TMP19]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP20]], [[TMP17]] ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]] @@ -2781,7 +2217,7 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read ; CHECK-INTERLEAVED-NEXT: [[G_1]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[G_016]], %[[FOR_BODY2]] ] ; CHECK-INTERLEAVED-NEXT: [[INC6]] = add nuw nsw i32 [[A_117]], 1 ; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC6]], [[I]] -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP29:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK-INTERLEAVED: [[FOR_END7]]: ; CHECK-INTERLEAVED-NEXT: [[G_1_LCSSA:%.*]] = phi i32 [ [[G_1]], %[[FOR_INC5]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[G_1_LCSSA]] @@ -2890,34 +2326,11 @@ define i32 @predicated_or_dominates_reduction(ptr %b) { ; CHECK-NEXT: [[TMP48]] = add i32 [[VEC_PHI]], [[TMP47]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP49:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP49]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP49]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_COND_CLEANUP]]: -; CHECK-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ [[A_1:%.*]], %[[FOR_INC:.*]] ], [ [[TMP48]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[A_1_LCSSA]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[G_09:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC3:%.*]], %[[FOR_INC]] ] -; CHECK-NEXT: [[A_08:%.*]] = phi i32 [ undef, %[[SCALAR_PH]] ], [ [[A_1]], %[[FOR_INC]] ] -; CHECK-NEXT: [[D:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i32 0, i32 [[G_09]], i32 1 -; CHECK-NEXT: [[TMP45:%.*]] = load i32, ptr [[D]], align 4 -; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP45]], 0 -; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label %[[LOR_LHS_FALSE:.*]], label %[[IF_THEN:.*]] -; CHECK: [[LOR_LHS_FALSE]]: -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i32 0, i32 [[G_09]] -; CHECK-NEXT: [[TMP46:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[TOBOOL2_NOT:%.*]] = icmp eq i32 [[TMP46]], 0 -; CHECK-NEXT: br i1 [[TOBOOL2_NOT]], label %[[FOR_INC]], label %[[IF_THEN]] -; CHECK: [[IF_THEN]]: -; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[A_08]], 1 -; CHECK-NEXT: br label %[[FOR_INC]] -; CHECK: [[FOR_INC]]: -; CHECK-NEXT: [[A_1]] = phi i32 [ [[INC]], %[[IF_THEN]] ], [ [[A_08]], %[[LOR_LHS_FALSE]] ] -; CHECK-NEXT: [[INC3]] = add nuw nsw i32 [[G_09]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC3]], 1000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] +; CHECK-NEXT: ret i32 [[TMP48]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @predicated_or_dominates_reduction( ; CHECK-INTERLEAVED-SAME: ptr [[B:%.*]]) { @@ -3051,35 +2464,12 @@ define i32 @predicated_or_dominates_reduction(ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP98]] = add i32 [[VEC_PHI1]], [[TMP97]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP99:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP99]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP99]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP98]], [[TMP94]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] ; CHECK-INTERLEAVED: [[FOR_COND_CLEANUP]]: -; CHECK-INTERLEAVED-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ [[A_1:%.*]], %[[FOR_INC:.*]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: ret i32 [[A_1_LCSSA]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[G_09:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC3:%.*]], %[[FOR_INC]] ] -; CHECK-INTERLEAVED-NEXT: [[A_08:%.*]] = phi i32 [ undef, %[[SCALAR_PH]] ], [ [[A_1]], %[[FOR_INC]] ] -; CHECK-INTERLEAVED-NEXT: [[D:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i32 0, i32 [[G_09]], i32 1 -; CHECK-INTERLEAVED-NEXT: [[TMP100:%.*]] = load i32, ptr [[D]], align 4 -; CHECK-INTERLEAVED-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP100]], 0 -; CHECK-INTERLEAVED-NEXT: br i1 [[TOBOOL_NOT]], label %[[LOR_LHS_FALSE:.*]], label %[[IF_THEN:.*]] -; CHECK-INTERLEAVED: [[LOR_LHS_FALSE]]: -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i32 0, i32 [[G_09]] -; CHECK-INTERLEAVED-NEXT: [[TMP101:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[TOBOOL2_NOT:%.*]] = icmp eq i32 [[TMP101]], 0 -; CHECK-INTERLEAVED-NEXT: br i1 [[TOBOOL2_NOT]], label %[[FOR_INC]], label %[[IF_THEN]] -; CHECK-INTERLEAVED: [[IF_THEN]]: -; CHECK-INTERLEAVED-NEXT: [[INC:%.*]] = add nsw i32 [[A_08]], 1 -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_INC]] -; CHECK-INTERLEAVED: [[FOR_INC]]: -; CHECK-INTERLEAVED-NEXT: [[A_1]] = phi i32 [ [[INC]], %[[IF_THEN]] ], [ [[A_08]], %[[LOR_LHS_FALSE]] ] -; CHECK-INTERLEAVED-NEXT: [[INC3]] = add nuw nsw i32 [[G_09]], 1 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC3]], 1000 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] +; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]] ; entry: br label %for.body @@ -3135,27 +2525,11 @@ define i32 @reduction_add_sub(ptr noalias nocapture %A, ptr noalias nocapture %B ; CHECK-NEXT: [[TMP6]] = add i32 [[TMP4]], [[TMP5]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 -; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[X_05:%.*]] = phi i32 [ [[SUB:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[L0_B:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[X_05]], [[L0]] -; CHECK-NEXT: [[SUB]] = sub nsw i32 [[ADD]], [[L0_B]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[FOR_BODY]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[X_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP6]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_add_sub( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { @@ -3187,28 +2561,12 @@ define i32 @reduction_add_sub(ptr noalias nocapture %A, ptr noalias nocapture %B ; CHECK-INTERLEAVED-NEXT: [[TMP13]] = add i32 [[TMP9]], [[TMP12]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP13]], [[TMP11]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[X_05:%.*]] = phi i32 [ [[SUB:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L0_B:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = add nsw i32 [[X_05]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[SUB]] = sub nsw i32 [[ADD]], [[L0_B]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: ret i32 [[X_0_LCSSA]] +; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]] ; entry: br label %for.body @@ -3254,27 +2612,11 @@ define i32 @reduction_sub_add(ptr noalias nocapture %A, ptr noalias nocapture %B ; CHECK-NEXT: [[TMP6]] = add i32 [[TMP4]], [[TMP5]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 -; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[X_05:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[L0_B:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4 -; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X_05]], [[L0]] -; CHECK-NEXT: [[ADD]] = add nsw i32 [[SUB]], [[L0_B]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[X_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP6]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sub_add( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { @@ -3306,28 +2648,12 @@ define i32 @reduction_sub_add(ptr noalias nocapture %A, ptr noalias nocapture %B ; CHECK-INTERLEAVED-NEXT: [[TMP13]] = add i32 [[TMP9]], [[TMP12]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP13]], [[TMP11]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[X_05:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L0_B:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4 -; CHECK-INTERLEAVED-NEXT: [[SUB:%.*]] = sub nsw i32 [[X_05]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[ADD]] = add nsw i32 [[SUB]], [[L0_B]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: ret i32 [[X_0_LCSSA]] +; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]] ; entry: br label %for.body @@ -3351,6 +2677,129 @@ for.end: ; preds = %for.body, %entry ret i32 %x.0.lcssa } +; Test that bundling recipes that share an operand into an expression works. +; In this case the two extends are the recipes that share an operand. +define i64 @reduction_expression_same_operands(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) { +; CHECK-LABEL: define i64 @reduction_expression_same_operands( +; CHECK-SAME: ptr readonly captures(none) [[X:%.*]], ptr readonly captures(none) [[Y:%.*]], i32 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP1]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i16> [[WIDE_LOAD]] to <4 x i64> +; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <4 x i64> [[TMP3]], [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP4]]) +; CHECK-NEXT: [[TMP6]] = add i64 [[VEC_PHI]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi i64 [ [[RDX_NEXT:%.*]], %[[LOOP]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[IV]] +; CHECK-NEXT: [[LOAD0:%.*]] = load i16, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[CONV0:%.*]] = sext i16 [[LOAD0]] to i32 +; CHECK-NEXT: [[CONV1:%.*]] = sext i16 [[LOAD0]] to i32 +; CHECK-NEXT: [[MUL1:%.*]] = mul nsw i32 [[CONV0]], [[CONV1]] +; CHECK-NEXT: [[MUL:%.*]] = sext i32 [[MUL1]] to i64 +; CHECK-NEXT: [[RDX_NEXT]] = add nsw i64 [[RDX]], [[MUL]] +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP31:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i64 [ [[RDX_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i64 [[R_0_LCSSA]] +; +; CHECK-INTERLEAVED-LABEL: define i64 @reduction_expression_same_operands( +; CHECK-INTERLEAVED-SAME: ptr readonly captures(none) [[X:%.*]], ptr readonly captures(none) [[Y:%.*]], i32 [[N:%.*]]) { +; CHECK-INTERLEAVED-NEXT: [[ENTRY:.*]]: +; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 8 +; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-INTERLEAVED: [[VECTOR_PH]]: +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], 8 +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]] +; CHECK-INTERLEAVED-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK-INTERLEAVED: [[VECTOR_BODY]]: +; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[INDEX]] +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i32 4 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP1]], align 4 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i16>, ptr [[TMP2]], align 4 +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = sext <4 x i16> [[WIDE_LOAD]] to <4 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nsw <4 x i64> [[TMP4]], [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP5]]) +; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add i64 [[VEC_PHI]], [[TMP6]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = sext <4 x i16> [[WIDE_LOAD2]] to <4 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul nsw <4 x i64> [[TMP9]], [[TMP9]] +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP10]]) +; CHECK-INTERLEAVED-NEXT: [[TMP12]] = add i64 [[VEC_PHI1]], [[TMP11]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i64 [[TMP12]], [[TMP7]] +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-INTERLEAVED: [[SCALAR_PH]]: +; CHECK-INTERLEAVED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-INTERLEAVED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-INTERLEAVED-NEXT: br label %[[LOOP:.*]] +; CHECK-INTERLEAVED: [[LOOP]]: +; CHECK-INTERLEAVED-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-INTERLEAVED-NEXT: [[RDX:%.*]] = phi i64 [ [[RDX_NEXT:%.*]], %[[LOOP]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ] +; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[IV]] +; CHECK-INTERLEAVED-NEXT: [[LOAD0:%.*]] = load i16, ptr [[ARRAYIDX]], align 4 +; CHECK-INTERLEAVED-NEXT: [[CONV0:%.*]] = sext i16 [[LOAD0]] to i32 +; CHECK-INTERLEAVED-NEXT: [[CONV1:%.*]] = sext i16 [[LOAD0]] to i32 +; CHECK-INTERLEAVED-NEXT: [[MUL1:%.*]] = mul nsw i32 [[CONV0]], [[CONV1]] +; CHECK-INTERLEAVED-NEXT: [[MUL:%.*]] = sext i32 [[MUL1]] to i64 +; CHECK-INTERLEAVED-NEXT: [[RDX_NEXT]] = add nsw i64 [[RDX]], [[MUL]] +; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 +; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP31:![0-9]+]] +; CHECK-INTERLEAVED: [[EXIT]]: +; CHECK-INTERLEAVED-NEXT: [[R_0_LCSSA:%.*]] = phi i64 [ [[RDX_NEXT]], %[[LOOP]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] +; CHECK-INTERLEAVED-NEXT: ret i64 [[R_0_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] + %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] + %arrayidx = getelementptr inbounds i16, ptr %x, i32 %iv + %load0 = load i16, ptr %arrayidx, align 4 + %conv0 = sext i16 %load0 to i32 + %conv1 = sext i16 %load0 to i32 + %mul = mul nsw i32 %conv0, %conv1 + %conv = sext i32 %mul to i64 + %rdx.next = add nsw i64 %rdx, %conv + %iv.next = add nuw nsw i32 %iv, 1 + %exitcond = icmp eq i32 %iv.next, %n + br i1 %exitcond, label %exit, label %loop + +exit: + %r.0.lcssa = phi i64 [ %rdx.next, %loop ] + ret i64 %r.0.lcssa +} + declare float @llvm.fmuladd.f32(float, float, float) !6 = distinct !{!6, !7, !8} diff --git a/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll b/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll index 7d35ad0..855a0ce 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll @@ -60,11 +60,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP25]]) ; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] @@ -162,11 +158,7 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP43]]) ; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] @@ -267,11 +259,7 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[PROD_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP42]]) ; CHECK-NEXT: ret i32 [[PROD_0_LCSSA]] @@ -371,11 +359,7 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP42]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] @@ -475,11 +459,7 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP42]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] @@ -579,11 +559,7 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP42]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] @@ -683,11 +659,7 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP42]]) ; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] @@ -787,11 +759,7 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP42]]) ; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] @@ -874,11 +842,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP25]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] @@ -959,11 +923,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[TMP25]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/reduction.ll b/llvm/test/Transforms/LoopVectorize/reduction.ll index 916a83a..65d5701 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction.ll @@ -775,21 +775,7 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[IF_THEN:%.*]], label [[FOR_INC:%.*]] -; CHECK: if.then: -; CHECK-NEXT: br i1 poison, label [[IF_THEN8:%.*]], label [[IF_ELSE:%.*]] -; CHECK: if.then8: -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: if.else: -; CHECK-NEXT: br i1 poison, label [[IF_THEN16:%.*]], label [[FOR_INC]] -; CHECK: if.then16: -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_END]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: for.end: ; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[PREDPHI3]]) ; CHECK-NEXT: ret float [[SUM_1_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll index e6ad593..e621b80 100644 --- a/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll +++ b/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll @@ -24,20 +24,8 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[BODY:.*]] -; CHECK: [[BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_TMP:%.*]] = phi i32 [ [[SUM:%.*]], %[[BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[LOAD0:%.*]] = load i32, ptr [[GEP0]], align 4 -; CHECK-NEXT: [[SUM]] = add i32 [[SUM_TMP]], [[LOAD0]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[BODY]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[SUM]], %[[BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP4]] ; entry: br label %body diff --git a/llvm/test/Transforms/LoopVectorize/reverse-induction-gep-nowrap-flags.ll b/llvm/test/Transforms/LoopVectorize/reverse-induction-gep-nowrap-flags.ll index 826696f..0896848 100644 --- a/llvm/test/Transforms/LoopVectorize/reverse-induction-gep-nowrap-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/reverse-induction-gep-nowrap-flags.ll @@ -25,22 +25,8 @@ define i32 @preserve_inbounds(i64 %start, ptr %ptr) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[REV_IND:%.*]] = phi i64 [ [[START]], %[[SCALAR_PH]] ], [ [[REV_IND_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[REDUX:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[REDUX_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[REV_IND_NEXT]] = add i64 [[REV_IND]], -1 -; CHECK-NEXT: [[GEP_PTR_IND:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[REV_IND_NEXT]] -; CHECK-NEXT: [[LD_PTR:%.*]] = load i32, ptr [[GEP_PTR_IND]], align 4 -; CHECK-NEXT: [[REDUX_NEXT]] = add i32 [[LD_PTR]], [[REDUX]] -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp ne i32 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[LOOP]], label %[[END]] ; CHECK: [[END]]: -; CHECK-NEXT: [[REDUX_NEXT_LCSSA:%.*]] = phi i32 [ [[REDUX_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[REDUX_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP6]] ; entry: br label %loop @@ -85,22 +71,8 @@ define i32 @preserve_nusw(i64 %start, ptr %ptr) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[REV_IND:%.*]] = phi i64 [ [[START]], %[[SCALAR_PH]] ], [ [[REV_IND_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[REDUX:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[REDUX_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[REV_IND_NEXT]] = add i64 [[REV_IND]], -1 -; CHECK-NEXT: [[GEP_PTR_IND:%.*]] = getelementptr nusw i32, ptr [[PTR]], i64 [[REV_IND_NEXT]] -; CHECK-NEXT: [[LD_PTR:%.*]] = load i32, ptr [[GEP_PTR_IND]], align 4 -; CHECK-NEXT: [[REDUX_NEXT]] = add i32 [[LD_PTR]], [[REDUX]] -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp ne i32 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[LOOP]], label %[[END]] ; CHECK: [[END]]: -; CHECK-NEXT: [[REDUX_NEXT_LCSSA:%.*]] = phi i32 [ [[REDUX_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[REDUX_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP6]] ; entry: br label %loop @@ -145,22 +117,8 @@ define i32 @drop_nuw(i64 %start, ptr %ptr) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[REV_IND:%.*]] = phi i64 [ [[START]], %[[SCALAR_PH]] ], [ [[REV_IND_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[REDUX:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[REDUX_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[REV_IND_NEXT]] = add i64 [[REV_IND]], -1 -; CHECK-NEXT: [[GEP_PTR_IND:%.*]] = getelementptr nuw i32, ptr [[PTR]], i64 [[REV_IND_NEXT]] -; CHECK-NEXT: [[LD_PTR:%.*]] = load i32, ptr [[GEP_PTR_IND]], align 4 -; CHECK-NEXT: [[REDUX_NEXT]] = add i32 [[LD_PTR]], [[REDUX]] -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp ne i32 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[LOOP]], label %[[END]] ; CHECK: [[END]]: -; CHECK-NEXT: [[REDUX_NEXT_LCSSA:%.*]] = phi i32 [ [[REDUX_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[REDUX_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP6]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/reverse_induction.ll b/llvm/test/Transforms/LoopVectorize/reverse_induction.ll index 5790921..31129d3 100644 --- a/llvm/test/Transforms/LoopVectorize/reverse_induction.ll +++ b/llvm/test/Transforms/LoopVectorize/reverse_induction.ll @@ -37,22 +37,8 @@ define i32 @reverse_induction_i64(i64 %startval, ptr %ptr) { ; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP11]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: br label %[[LOOPEND:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[ADD_I7:%.*]] = phi i64 [ [[STARTVAL]], %[[SCALAR_PH]] ], [ [[ADD_I:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[I_06:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC4:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[REDUX5:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC_REDUX:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ADD_I]] = add i64 [[ADD_I7]], -1 -; CHECK-NEXT: [[KIND__I:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[ADD_I]] -; CHECK-NEXT: [[TMP_I1:%.*]] = load i32, ptr [[KIND__I]], align 4 -; CHECK-NEXT: [[INC_REDUX]] = add i32 [[TMP_I1]], [[REDUX5]] -; CHECK-NEXT: [[INC4]] = add i32 [[I_06]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC4]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[LOOPEND]] ; CHECK: [[LOOPEND]]: -; CHECK-NEXT: [[INC_REDUX_LCSSA:%.*]] = phi i32 [ [[INC_REDUX]], %[[FOR_BODY]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[INC_REDUX_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP13]] ; entry: br label %for.body @@ -105,22 +91,8 @@ define i32 @reverse_induction_i128(i128 %startval, ptr %ptr) { ; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP11]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: br label %[[LOOPEND:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[ADD_I7:%.*]] = phi i128 [ [[STARTVAL]], %[[SCALAR_PH]] ], [ [[ADD_I:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[I_06:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC4:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[REDUX5:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC_REDUX:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ADD_I]] = add i128 [[ADD_I7]], -1 -; CHECK-NEXT: [[KIND__I:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i128 [[ADD_I]] -; CHECK-NEXT: [[TMP_I1:%.*]] = load i32, ptr [[KIND__I]], align 4 -; CHECK-NEXT: [[INC_REDUX]] = add i32 [[TMP_I1]], [[REDUX5]] -; CHECK-NEXT: [[INC4]] = add i32 [[I_06]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC4]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[LOOPEND]] ; CHECK: [[LOOPEND]]: -; CHECK-NEXT: [[INC_REDUX_LCSSA:%.*]] = phi i32 [ [[INC_REDUX]], %[[FOR_BODY]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[INC_REDUX_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP13]] ; entry: br label %for.body @@ -263,19 +235,6 @@ define void @reverse_forward_induction_i64_i8() { ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[WHILE_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[WHILE_BODY:.*]] -; CHECK: [[WHILE_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 1023, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ] -; CHECK-NEXT: [[FORWARD_INDUCTION_05:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[WHILE_BODY]] ] -; CHECK-NEXT: [[INC]] = add i8 [[FORWARD_INDUCTION_05]], 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[INC]] to i32 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr @a, i64 0, i64 [[INDVARS_IV]] -; CHECK-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], -1 -; CHECK-NEXT: [[TMP13:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP13]], 0 -; CHECK-NEXT: br i1 [[CMP]], label %[[WHILE_BODY]], label %[[WHILE_END]] ; CHECK: [[WHILE_END]]: ; CHECK-NEXT: ret void ; @@ -329,19 +288,6 @@ define void @reverse_forward_induction_i64_i8_signed() { ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[WHILE_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[WHILE_BODY:.*]] -; CHECK: [[WHILE_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 1023, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ] -; CHECK-NEXT: [[FORWARD_INDUCTION_05:%.*]] = phi i8 [ -127, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[WHILE_BODY]] ] -; CHECK-NEXT: [[INC]] = add i8 [[FORWARD_INDUCTION_05]], 1 -; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[INC]] to i32 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr @a, i64 0, i64 [[INDVARS_IV]] -; CHECK-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], -1 -; CHECK-NEXT: [[TMP13:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP13]], 0 -; CHECK-NEXT: br i1 [[CMP]], label %[[WHILE_BODY]], label %[[WHILE_END]] ; CHECK: [[WHILE_END]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check.ll b/llvm/test/Transforms/LoopVectorize/runtime-check.ll index 79fdc07..f87be5a 100644 --- a/llvm/test/Transforms/LoopVectorize/runtime-check.ll +++ b/llvm/test/Transforms/LoopVectorize/runtime-check.ll @@ -429,13 +429,9 @@ define dso_local void @forced_optsize(ptr noalias nocapture readonly %x_p, ptr n ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] ; ; FORCED_OPTSIZE-LABEL: @forced_optsize( ; FORCED_OPTSIZE-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll b/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll index a43ea07d..c7b2704 100644 --- a/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll +++ b/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll @@ -19,60 +19,49 @@ define void @test_pr63368(i1 %c, ptr %A) { ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP0]], [[VECTOR_BODY]] ] ; CHECK-NEXT: br label [[EXIT_1:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_1_HEADER:%.*]] -; CHECK: loop.1.header: -; CHECK-NEXT: [[IV_1:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_1_NEXT:%.*]], [[LOOP_1_LATCH:%.*]] ] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[A]], align 4 -; CHECK-NEXT: br i1 [[C]], label [[LOOP_1_LATCH]], label [[LOOP_1_LATCH]] -; CHECK: loop.1.latch: -; CHECK-NEXT: [[L_LCSSA:%.*]] = phi i32 [ [[L]], [[LOOP_1_HEADER]] ], [ [[L]], [[LOOP_1_HEADER]] ] -; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i32 [[IV_1]], 1 -; CHECK-NEXT: [[EC_1:%.*]] = icmp eq i32 [[IV_1_NEXT]], 100 -; CHECK-NEXT: br i1 [[EC_1]], label [[EXIT_1]], label [[LOOP_1_HEADER]] ; CHECK: exit.1: -; CHECK-NEXT: [[L_LCSSA_LCSSA:%.*]] = phi i32 [ [[L_LCSSA]], [[LOOP_1_LATCH]] ], [ [[TMP0]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[SMAX1:%.*]] = call i32 @llvm.smax.i32(i32 [[L_LCSSA_LCSSA]], i32 -1) +; CHECK-NEXT: [[SMAX1:%.*]] = call i32 @llvm.smax.i32(i32 [[DOTLCSSA]], i32 -1) ; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[SMAX1]], 2 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP2]], 4 -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH2:%.*]], label [[VECTOR_SCEVCHECK:%.*]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; CHECK: vector.scevcheck: -; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[L_LCSSA_LCSSA]], i32 -1) +; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 poison, i32 -1) ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[SMAX]], 1 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8 ; CHECK-NEXT: [[TMP5:%.*]] = add i8 1, [[TMP4]] ; CHECK-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], 1 ; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP3]], 255 ; CHECK-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] -; CHECK-NEXT: br i1 [[TMP8]], label [[SCALAR_PH2]], label [[VECTOR_PH3:%.*]] -; CHECK: vector.ph3: +; CHECK-NEXT: br i1 [[TMP8]], label [[SCALAR_PH]], label [[VECTOR_PH2:%.*]] +; CHECK: vector.ph2: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP2]], 4 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP2]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP9:%.*]] = trunc i32 [[N_VEC]] to i8 -; CHECK-NEXT: br label [[VECTOR_BODY4:%.*]] -; CHECK: vector.body4: -; CHECK-NEXT: [[INDEX5:%.*]] = phi i32 [ 0, [[VECTOR_PH3]] ], [ [[INDEX_NEXT6:%.*]], [[VECTOR_BODY4]] ] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX5]] to i8 +; CHECK-NEXT: br label [[VECTOR_BODY3:%.*]] +; CHECK: vector.body3: +; CHECK-NEXT: [[INDEX4:%.*]] = phi i32 [ 0, [[VECTOR_PH2]] ], [ [[INDEX_NEXT5:%.*]], [[VECTOR_BODY3]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX4]] to i8 ; CHECK-NEXT: [[TMP10:%.*]] = add i8 [[OFFSET_IDX]], 1 ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[A]], i8 [[TMP10]] ; CHECK-NEXT: store <4 x i8> zeroinitializer, ptr [[TMP11]], align 1 -; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i32 [[INDEX5]], 4 -; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT6]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK7:%.*]], label [[VECTOR_BODY4]], !llvm.loop [[LOOP3:![0-9]+]] -; CHECK: middle.block7: +; CHECK-NEXT: [[INDEX_NEXT5]] = add nuw i32 [[INDEX4]], 4 +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT5]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK6:%.*]], label [[VECTOR_BODY3]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: middle.block6: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP2]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT_2:%.*]], label [[SCALAR_PH2]] -; CHECK: scalar.ph2: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ [[TMP9]], [[MIDDLE_BLOCK7]] ], [ 0, [[EXIT_1]] ], [ 0, [[VECTOR_SCEVCHECK]] ] +; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT_2:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ [[TMP9]], [[MIDDLE_BLOCK6]] ], [ 0, [[EXIT_1]] ], [ 0, [[VECTOR_SCEVCHECK]] ] ; CHECK-NEXT: br label [[LOOP_2:%.*]] ; CHECK: loop.2: -; CHECK-NEXT: [[IV_2:%.*]] = phi i8 [ [[BC_RESUME_VAL]], [[SCALAR_PH2]] ], [ [[IV_2_NEXT:%.*]], [[LOOP_2]] ] +; CHECK-NEXT: [[IV_2:%.*]] = phi i8 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP_2]] ] ; CHECK-NEXT: [[IV_2_NEXT]] = add i8 [[IV_2]], 1 ; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i8 [[IV_2_NEXT]] ; CHECK-NEXT: store i8 0, ptr [[GEP_A]], align 1 ; CHECK-NEXT: [[IV_2_SEXT:%.*]] = sext i8 [[IV_2]] to i32 -; CHECK-NEXT: [[EC_2:%.*]] = icmp sge i32 [[L_LCSSA_LCSSA]], [[IV_2_SEXT]] +; CHECK-NEXT: [[EC_2:%.*]] = icmp sge i32 [[DOTLCSSA]], [[IV_2_SEXT]] ; CHECK-NEXT: br i1 [[EC_2]], label [[LOOP_2]], label [[EXIT_2]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: exit.2: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/LoopVectorize/select-neg-cond.ll b/llvm/test/Transforms/LoopVectorize/select-neg-cond.ll index d620b92..92af828 100644 --- a/llvm/test/Transforms/LoopVectorize/select-neg-cond.ll +++ b/llvm/test/Transforms/LoopVectorize/select-neg-cond.ll @@ -20,21 +20,6 @@ define void @neg_cond(ptr noalias %p, ptr noalias %q) { ; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_GEP:%.*]] = getelementptr i32, ptr [[P]], i32 [[IV]] -; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[P_GEP]], align 4 -; CHECK-NEXT: [[Q_GEP:%.*]] = getelementptr i32, ptr [[Q]], i32 [[IV]] -; CHECK-NEXT: [[Y:%.*]] = load i32, ptr [[Q_GEP]], align 4 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], 42 -; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[CMP]], true -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[NOT]], i32 42, i32 43 -; CHECK-NEXT: store i32 [[SEL]], ptr [[P_GEP]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i32 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/select-reduction-start-value-may-be-undef-or-poison.ll b/llvm/test/Transforms/LoopVectorize/select-reduction-start-value-may-be-undef-or-poison.ll index b87cf90..f4d5a84 100644 --- a/llvm/test/Transforms/LoopVectorize/select-reduction-start-value-may-be-undef-or-poison.ll +++ b/llvm/test/Transforms/LoopVectorize/select-reduction-start-value-may-be-undef-or-poison.ll @@ -25,21 +25,8 @@ define i64 @pr62565_incoming_value_known_undef(i64 %a, ptr %src) { ; CHECK-NEXT: [[TMP5:%.*]] = freeze i1 [[TMP4]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP5]], i64 [[A]], i64 undef ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[ADD:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ undef, [[SCALAR_PH]] ], [ [[SELECT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 1 -; CHECK-NEXT: [[SELECT]] = select i1 [[C]], i64 [[RED]], i64 [[A]] -; CHECK-NEXT: [[ADD]] = add nuw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 32 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[SELECT]], [[LOOP]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[PHI]] +; CHECK-NEXT: ret i64 [[RDX_SELECT]] ; entry: br label %loop @@ -83,21 +70,8 @@ define i64 @pr62565_incoming_value_known_poison(i64 %a, ptr %src) { ; CHECK-NEXT: [[TMP5:%.*]] = freeze i1 [[TMP4]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP5]], i64 [[A]], i64 poison ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[ADD:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ poison, [[SCALAR_PH]] ], [ [[SELECT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 1 -; CHECK-NEXT: [[SELECT]] = select i1 [[C]], i64 [[RED]], i64 [[A]] -; CHECK-NEXT: [[ADD]] = add nuw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 32 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[SELECT]], [[LOOP]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[PHI]] +; CHECK-NEXT: ret i64 [[RDX_SELECT]] ; entry: br label %loop @@ -141,21 +115,8 @@ define i64 @pr62565_incoming_value_may_be_poison(i64 %a, ptr %src, i64 %start) { ; CHECK-NEXT: [[TMP5:%.*]] = freeze i1 [[TMP4]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP5]], i64 [[A]], i64 [[START]] ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[ADD:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ [[START]], [[SCALAR_PH]] ], [ [[SELECT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 1 -; CHECK-NEXT: [[SELECT]] = select i1 [[C]], i64 [[RED]], i64 [[A]] -; CHECK-NEXT: [[ADD]] = add nuw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 32 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[SELECT]], [[LOOP]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[PHI]] +; CHECK-NEXT: ret i64 [[RDX_SELECT]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/select-reduction.ll b/llvm/test/Transforms/LoopVectorize/select-reduction.ll index 0fd780e..1f5646d 100644 --- a/llvm/test/Transforms/LoopVectorize/select-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/select-reduction.ll @@ -36,22 +36,11 @@ define i32 @test(i64 %N, i32 %x) { ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP3]]) -; CHECK-NEXT: br label [[EXIT_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[NEXT:%.*]] = phi i32 [ [[SEL:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[EXTRA_ITER]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[SEL_COND:%.*]] = icmp sgt i32 [[NEXT]], 10 -; CHECK-NEXT: [[SEL]] = select i1 [[SEL_COND]], i32 [[NEXT]], i32 10 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 0 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT_LOOPEXIT]], label [[LOOP]] ; CHECK: exit.loopexit: -; CHECK-NEXT: [[SEL_LCSSA:%.*]] = phi i32 [ [[SEL]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: -; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ 0, [[CHECK]] ], [ [[SEL_LCSSA]], [[EXIT_LOOPEXIT]] ] +; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ 0, [[CHECK]] ], [ [[TMP5]], [[LOOP]] ] ; CHECK-NEXT: ret i32 [[RESULT]] ; entry: @@ -90,19 +79,9 @@ define i32 @pr66895_tail_fold_reduction_exit_inst_gets_simplified(i32 %n) { ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[VEC_PHI]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 12, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], -1 -; CHECK-NEXT: [[RED_NEXT]] = mul i32 [[RED]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 0 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[RED_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RED_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP3]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-cond-poison.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-cond-poison.ll index edad0b5..794e274 100644 --- a/llvm/test/Transforms/LoopVectorize/single-early-exit-cond-poison.ll +++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-cond-poison.ll @@ -40,20 +40,8 @@ define noundef i32 @f(i32 noundef %g) { ; VF4IC2-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32 ; VF4IC2-NEXT: [[TMP16:%.*]] = add i32 0, [[TMP15]] ; VF4IC2-NEXT: br label %[[RETURN]] -; VF4IC2: [[SCALAR_PH:.*]]: -; VF4IC2-NEXT: br label %[[LOOP_HEADER:.*]] -; VF4IC2: [[LOOP_HEADER]]: -; VF4IC2-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF4IC2-NEXT: [[MUL:%.*]] = shl nuw nsw i32 [[IV]], 3 -; VF4IC2-NEXT: [[SHR:%.*]] = ashr i32 [[G]], [[MUL]] -; VF4IC2-NEXT: [[EARLY_COND:%.*]] = icmp eq i32 [[SHR]], 0 -; VF4IC2-NEXT: br i1 [[EARLY_COND]], label %[[LOOP_LATCH]], label %[[RETURN]] -; VF4IC2: [[LOOP_LATCH]]: -; VF4IC2-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; VF4IC2-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 8 -; VF4IC2-NEXT: br i1 [[EC]], label %[[RETURN]], label %[[LOOP_HEADER]] ; VF4IC2: [[RETURN]]: -; VF4IC2-NEXT: [[RES:%.*]] = phi i32 [ [[SHR]], %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ], [ [[TMP16]], %[[VECTOR_EARLY_EXIT]] ] +; VF4IC2-NEXT: [[RES:%.*]] = phi i32 [ [[TMP8]], %[[MIDDLE_BLOCK]] ], [ [[TMP16]], %[[VECTOR_EARLY_EXIT]] ] ; VF4IC2-NEXT: ret i32 [[RES]] ; ; VF8IC1-LABEL: define noundef i32 @f( @@ -80,20 +68,8 @@ define noundef i32 @f(i32 noundef %g) { ; VF8IC1-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 ; VF8IC1-NEXT: [[TMP7:%.*]] = add i32 0, [[TMP6]] ; VF8IC1-NEXT: br label %[[RETURN]] -; VF8IC1: [[SCALAR_PH:.*]]: -; VF8IC1-NEXT: br label %[[LOOP_HEADER:.*]] -; VF8IC1: [[LOOP_HEADER]]: -; VF8IC1-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF8IC1-NEXT: [[MUL:%.*]] = shl nuw nsw i32 [[IV]], 3 -; VF8IC1-NEXT: [[SHR:%.*]] = ashr i32 [[G]], [[MUL]] -; VF8IC1-NEXT: [[EARLY_COND:%.*]] = icmp eq i32 [[SHR]], 0 -; VF8IC1-NEXT: br i1 [[EARLY_COND]], label %[[LOOP_LATCH]], label %[[RETURN]] -; VF8IC1: [[LOOP_LATCH]]: -; VF8IC1-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; VF8IC1-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 8 -; VF8IC1-NEXT: br i1 [[EC]], label %[[RETURN]], label %[[LOOP_HEADER]] ; VF8IC1: [[RETURN]]: -; VF8IC1-NEXT: [[RES:%.*]] = phi i32 [ [[SHR]], %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ], [ [[TMP7]], %[[VECTOR_EARLY_EXIT]] ] +; VF8IC1-NEXT: [[RES:%.*]] = phi i32 [ [[TMP4]], %[[MIDDLE_BLOCK]] ], [ [[TMP7]], %[[VECTOR_EARLY_EXIT]] ] ; VF8IC1-NEXT: ret i32 [[RES]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll index b1b3a3f..644900d 100644 --- a/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll +++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll @@ -9,9 +9,9 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_si ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P2]], i64 4), "dereferenceable"(ptr [[P2]], i64 1024) ] ; CHECK-NEXT: br label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX1]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP0]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX1]] @@ -22,7 +22,7 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_si ; CHECK-NEXT: [[TMP5:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP3]]) ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 1024 ; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]] -; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_SPLIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_SPLIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_SPLIT]]: ; CHECK-NEXT: br i1 [[TMP5]], label %[[VECTOR_EARLY_EXIT:.*]], label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -31,22 +31,8 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_si ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP4]], i1 true) ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX1]], [[TMP8]] ; CHECK-NEXT: br label %[[LOOP_END]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP1:.*]] -; CHECK: [[LOOP1]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], %[[LOOP_INC:.*]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label %[[LOOP_INC]], label %[[LOOP_END]] -; CHECK: [[LOOP_INC]]: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP1]], label %[[LOOP_END]] ; CHECK: [[LOOP_END]]: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], %[[LOOP1]] ], [ -1, %[[LOOP_INC]] ], [ -1, %[[MIDDLE_BLOCK]] ], [ [[TMP9]], %[[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ -1, %[[MIDDLE_BLOCK]] ], [ [[TMP9]], %[[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -331,9 +317,9 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_n_not_zero_i16_p ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[N_VEC]], 2 ; CHECK-NEXT: [[IV_NEXT1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]] -; CHECK-NEXT: br label %[[LOOP_HEADER1:.*]] -; CHECK: [[LOOP_HEADER1]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[LOOP_HEADER1]] ] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[NEXT_GEP]], align 2 @@ -343,10 +329,10 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_n_not_zero_i16_p ; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]]) ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_SPLIT:.*]], label %[[LOOP_HEADER1]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_SPLIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_SPLIT]]: -; CHECK-NEXT: br i1 [[TMP7]], label %[[VECTOR_EARLY_EXIT:.*]], label %[[LOOP_LATCH1:.*]] -; CHECK: [[LOOP_LATCH1]]: +; CHECK-NEXT: br i1 [[TMP7]], label %[[VECTOR_EARLY_EXIT:.*]], label %[[MIDDLE_BLOCK:.*]] +; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_PH]] ; CHECK: [[VECTOR_EARLY_EXIT]]: @@ -356,10 +342,10 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_n_not_zero_i16_p ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP12]] ; CHECK-NEXT: br label %[[EXIT_LOOPEXIT]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[IV1:%.*]] = phi ptr [ [[IV_NEXT1]], %[[LOOP_LATCH1]] ], [ [[A]], %[[LOOP_HEADER_PREHEADER]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IV_NEXT1]], %[[MIDDLE_BLOCK]] ], [ [[A]], %[[LOOP_HEADER_PREHEADER]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi ptr [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[IV1]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi ptr [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[IV]], align 2 ; CHECK-NEXT: [[C_0:%.*]] = icmp eq i16 [[L]], 0 ; CHECK-NEXT: br i1 [[C_0]], label %[[EXIT_LOOPEXIT]], label %[[LOOP_LATCH]] @@ -368,7 +354,7 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_n_not_zero_i16_p ; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[IV_NEXT]], [[A_END]] ; CHECK-NEXT: br i1 [[EC]], label %[[EXIT_LOOPEXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[EXIT_LOOPEXIT]]: -; CHECK-NEXT: [[P_PH:%.*]] = phi ptr [ [[A_END]], %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ [[A_END]], %[[LOOP_LATCH1]] ], [ [[TMP13]], %[[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[P_PH:%.*]] = phi ptr [ [[A_END]], %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ [[A_END]], %[[MIDDLE_BLOCK]] ], [ [[TMP13]], %[[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: br label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[P:%.*]] = phi ptr [ [[A]], %[[ENTRY]] ], [ [[P_PH]], %[[EXIT_LOOPEXIT]] ] diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-hint.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-hint.ll index b630557..d8e62c7 100644 --- a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-hint.ll +++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-hint.ll @@ -43,24 +43,10 @@ define i64 @multi_exiting_to_different_exits_live_in_exit_values() { ; VF4IC4-NEXT: br label %[[E2:.*]] ; VF4IC4: [[VECTOR_EARLY_EXIT]]: ; VF4IC4-NEXT: br label %[[E1:.*]] -; VF4IC4: [[SCALAR_PH:.*]]: -; VF4IC4-NEXT: br label %[[LOOP_HEADER:.*]] -; VF4IC4: [[LOOP_HEADER]]: -; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[INC:%.*]], %[[LOOP_LATCH:.*]] ], [ 0, %[[SCALAR_PH]] ] -; VF4IC4-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]] -; VF4IC4-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 4 -; VF4IC4-NEXT: [[C_1:%.*]] = icmp eq i32 [[L]], 10 -; VF4IC4-NEXT: br i1 [[C_1]], label %[[E1]], label %[[LOOP_LATCH]] -; VF4IC4: [[LOOP_LATCH]]: -; VF4IC4-NEXT: [[INC]] = add nuw i64 [[IV]], 1 -; VF4IC4-NEXT: [[C_2:%.*]] = icmp eq i64 [[INC]], 128 -; VF4IC4-NEXT: br i1 [[C_2]], label %[[E2]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]] ; VF4IC4: [[E1]]: -; VF4IC4-NEXT: [[P1:%.*]] = phi i64 [ 0, %[[LOOP_HEADER]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] -; VF4IC4-NEXT: ret i64 [[P1]] +; VF4IC4-NEXT: ret i64 0 ; VF4IC4: [[E2]]: -; VF4IC4-NEXT: [[P2:%.*]] = phi i64 [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ] -; VF4IC4-NEXT: ret i64 [[P2]] +; VF4IC4-NEXT: ret i64 1 ; entry: %src = alloca [128 x i32] @@ -94,6 +80,4 @@ e2: ; VF4IC4: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; VF4IC4: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; VF4IC4: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; VF4IC4: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} -; VF4IC4: [[META4]] = !{!"llvm.loop.interleave.count", i32 4} ;. diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-only.ll index 6836f7b..a50ce96 100644 --- a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-only.ll +++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-only.ll @@ -46,21 +46,9 @@ define i8 @iv_used_in_exit_with_math(i8 noundef %g) { ; CHECK-NEXT: [[TMP20:%.*]] = trunc i32 [[TMP19]] to i8 ; CHECK-NEXT: [[TMP23:%.*]] = trunc i32 [[TMP19]] to i8 ; CHECK-NEXT: br label %[[RETURN]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[S:%.*]] = shl nuw i8 1, [[IV]] -; CHECK-NEXT: [[A:%.*]] = and i8 [[S]], [[G]] -; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[RETURN]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i8 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i8 [[IV_NEXT]], 4 -; CHECK-NEXT: br i1 [[EC]], label %[[RETURN]], label %[[LOOP_HEADER]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[RES_IV1:%.*]] = phi i8 [ 32, %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ 32, %[[MIDDLE_BLOCK]] ], [ [[TMP20]], %[[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: [[RES_IV2:%.*]] = phi i8 [ 0, %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ 0, %[[MIDDLE_BLOCK]] ], [ [[TMP23]], %[[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RES_IV1:%.*]] = phi i8 [ 32, %[[MIDDLE_BLOCK]] ], [ [[TMP20]], %[[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RES_IV2:%.*]] = phi i8 [ 0, %[[MIDDLE_BLOCK]] ], [ [[TMP23]], %[[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: [[RES:%.*]] = add i8 [[RES_IV1]], [[RES_IV2]] ; CHECK-NEXT: ret i8 [[RES]] ; @@ -125,21 +113,9 @@ define i32 @iv_used_in_exit_with_loads(ptr align 4 dereferenceable(128) %src) { ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32 ; CHECK-NEXT: [[TMP29:%.*]] = add i32 [[INDEX]], [[TMP28]] ; CHECK-NEXT: br label %[[RETURN]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[RETURN]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 32 -; CHECK-NEXT: br i1 [[EC]], label %[[RETURN]], label %[[LOOP_HEADER]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[RES_IV1:%.*]] = phi i32 [ 32, %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ 32, %[[MIDDLE_BLOCK]] ], [ [[TMP29]], %[[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: [[RES_IV2:%.*]] = phi i32 [ 0, %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ 0, %[[MIDDLE_BLOCK]] ], [ [[TMP29]], %[[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RES_IV1:%.*]] = phi i32 [ 32, %[[MIDDLE_BLOCK]] ], [ [[TMP29]], %[[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RES_IV2:%.*]] = phi i32 [ 0, %[[MIDDLE_BLOCK]] ], [ [[TMP29]], %[[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: [[RES:%.*]] = add i32 [[RES_IV1]], [[RES_IV2]] ; CHECK-NEXT: ret i32 [[RES]] ; diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave.ll index a4ce68f..ed5dcc7 100644 --- a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave.ll @@ -42,25 +42,11 @@ define i64 @multi_exiting_to_different_exits_live_in_exit_values() { ; VF4IC4: middle.block: ; VF4IC4-NEXT: br label [[E2:%.*]] ; VF4IC4: vector.early.exit: -; VF4IC4-NEXT: br label [[E1:%.*]] -; VF4IC4: scalar.ph: ; VF4IC4-NEXT: br label [[LOOP_HEADER:%.*]] -; VF4IC4: loop.header: -; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[INC:%.*]], [[LOOP_LATCH:%.*]] ], [ 0, [[SCALAR_PH:%.*]] ] -; VF4IC4-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]] -; VF4IC4-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 4 -; VF4IC4-NEXT: [[C_1:%.*]] = icmp eq i32 [[L]], 10 -; VF4IC4-NEXT: br i1 [[C_1]], label [[E1]], label [[LOOP_LATCH]] -; VF4IC4: loop.latch: -; VF4IC4-NEXT: [[INC]] = add nuw i64 [[IV]], 1 -; VF4IC4-NEXT: [[C_2:%.*]] = icmp eq i64 [[INC]], 128 -; VF4IC4-NEXT: br i1 [[C_2]], label [[E2]], label [[LOOP_HEADER]] ; VF4IC4: e1: -; VF4IC4-NEXT: [[P1:%.*]] = phi i64 [ 0, [[LOOP_HEADER]] ], [ 0, [[VECTOR_EARLY_EXIT]] ] -; VF4IC4-NEXT: ret i64 [[P1]] +; VF4IC4-NEXT: ret i64 0 ; VF4IC4: e2: -; VF4IC4-NEXT: [[P2:%.*]] = phi i64 [ 1, [[LOOP_LATCH]] ], [ 1, [[MIDDLE_BLOCK]] ] -; VF4IC4-NEXT: ret i64 [[P2]] +; VF4IC4-NEXT: ret i64 1 ; entry: %src = alloca [128 x i32] @@ -155,22 +141,8 @@ define i64 @same_exit_block_pre_inc_use1() { ; VF4IC4-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], [[TMP8]] ; VF4IC4-NEXT: [[TMP10:%.*]] = add i64 3, [[TMP9]] ; VF4IC4-NEXT: br label [[LOOP_END]] -; VF4IC4: scalar.ph: -; VF4IC4-NEXT: br label [[LOOP:%.*]] -; VF4IC4: loop: -; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; VF4IC4-NEXT: [[GEP_P1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IV]] -; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP_P1]], align 1 -; VF4IC4-NEXT: [[GEP_P2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IV]] -; VF4IC4-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_P2]], align 1 -; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; VF4IC4: loop.inc: -; VF4IC4-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], 67 -; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; VF4IC4: loop.end: -; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i64 [ [[IV]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ] +; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ] ; VF4IC4-NEXT: ret i64 [[RETVAL]] ; entry: @@ -256,19 +228,8 @@ define ptr @same_exit_block_pre_inc_use1_ivptr() { ; VF4IC4-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], [[TMP6]] ; VF4IC4-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP7]] ; VF4IC4-NEXT: br label [[LOOP_END]] -; VF4IC4: scalar.ph: -; VF4IC4-NEXT: br label [[LOOP:%.*]] -; VF4IC4: loop: -; VF4IC4-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[P1]], [[SCALAR_PH:%.*]] ] -; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[PTR]], align 1 -; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], 72 -; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; VF4IC4: loop.inc: -; VF4IC4-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 1 -; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[PTR_NEXT]], [[PTREND]] -; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; VF4IC4: loop.end: -; VF4IC4-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTR]], [[LOOP]] ], [ [[PTREND]], [[LOOP_INC]] ], [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[TMP8]], [[VECTOR_EARLY_EXIT]] ] +; VF4IC4-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[TMP8]], [[VECTOR_EARLY_EXIT]] ] ; VF4IC4-NEXT: ret ptr [[RETVAL]] ; entry: @@ -360,22 +321,8 @@ define i64 @same_exit_block_post_inc_use() { ; VF4IC4-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], [[TMP8]] ; VF4IC4-NEXT: [[TMP10:%.*]] = add i64 3, [[TMP9]] ; VF4IC4-NEXT: br label [[LOOP_END]] -; VF4IC4: scalar.ph: -; VF4IC4-NEXT: br label [[LOOP:%.*]] -; VF4IC4: loop: -; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; VF4IC4-NEXT: [[GEP_P1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IV]] -; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP_P1]], align 1 -; VF4IC4-NEXT: [[GEP_P2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IV]] -; VF4IC4-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_P2]], align 1 -; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; VF4IC4: loop.inc: -; VF4IC4-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], 67 -; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; VF4IC4: loop.end: -; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i64 [ [[IV]], [[LOOP]] ], [ [[IV_NEXT]], [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ] +; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ] ; VF4IC4-NEXT: ret i64 [[RETVAL]] ; entry: @@ -470,27 +417,11 @@ define i64 @diff_exit_block_pre_inc_use1() { ; VF4IC4-NEXT: [[TMP8:%.*]] = select i1 [[TMP32]], i64 [[TMP31]], i64 [[TMP29]] ; VF4IC4-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], [[TMP8]] ; VF4IC4-NEXT: [[TMP10:%.*]] = add i64 3, [[TMP9]] -; VF4IC4-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; VF4IC4: scalar.ph: ; VF4IC4-NEXT: br label [[LOOP:%.*]] -; VF4IC4: loop: -; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; VF4IC4-NEXT: [[GEP_P1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IV]] -; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP_P1]], align 1 -; VF4IC4-NEXT: [[GEP_P2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IV]] -; VF4IC4-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_P2]], align 1 -; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; VF4IC4: loop.inc: -; VF4IC4-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], 67 -; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; VF4IC4: loop.early.exit: -; VF4IC4-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[IV]], [[LOOP]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ] -; VF4IC4-NEXT: ret i64 [[RETVAL1]] +; VF4IC4-NEXT: ret i64 [[TMP10]] ; VF4IC4: loop.end: -; VF4IC4-NEXT: [[RETVAL2:%.*]] = phi i64 [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ] -; VF4IC4-NEXT: ret i64 [[RETVAL2]] +; VF4IC4-NEXT: ret i64 67 ; entry: %p1 = alloca [1024 x i8] @@ -588,27 +519,11 @@ define i64 @diff_exit_block_post_inc_use1() { ; VF4IC4-NEXT: [[TMP8:%.*]] = select i1 [[TMP32]], i64 [[TMP31]], i64 [[TMP29]] ; VF4IC4-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], [[TMP8]] ; VF4IC4-NEXT: [[TMP10:%.*]] = add i64 3, [[TMP9]] -; VF4IC4-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; VF4IC4: scalar.ph: ; VF4IC4-NEXT: br label [[LOOP:%.*]] -; VF4IC4: loop: -; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; VF4IC4-NEXT: [[GEP_P1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IV]] -; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP_P1]], align 1 -; VF4IC4-NEXT: [[GEP_P2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IV]] -; VF4IC4-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_P2]], align 1 -; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; VF4IC4: loop.inc: -; VF4IC4-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], 67 -; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; VF4IC4: loop.early.exit: -; VF4IC4-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[IV]], [[LOOP]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ] -; VF4IC4-NEXT: ret i64 [[RETVAL1]] +; VF4IC4-NEXT: ret i64 [[TMP10]] ; VF4IC4: loop.end: -; VF4IC4-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[IV_NEXT]], [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ] -; VF4IC4-NEXT: ret i64 [[RETVAL2]] +; VF4IC4-NEXT: ret i64 67 ; entry: %p1 = alloca [1024 x i8] @@ -847,22 +762,8 @@ define i8 @same_exit_block_use_loaded_value() { ; VF4IC4-NEXT: [[TMP41:%.*]] = icmp uge i64 [[TMP8]], 12 ; VF4IC4-NEXT: [[TMP42:%.*]] = select i1 [[TMP41]], i8 [[TMP40]], i8 [[TMP38]] ; VF4IC4-NEXT: br label [[LOOP_END]] -; VF4IC4: scalar.ph: -; VF4IC4-NEXT: br label [[LOOP:%.*]] -; VF4IC4: loop: -; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 0, [[SCALAR_PH:%.*]] ] -; VF4IC4-NEXT: [[GEP_P1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IV]] -; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP_P1]], align 1 -; VF4IC4-NEXT: [[GEP_P2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IV]] -; VF4IC4-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_P2]], align 1 -; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; VF4IC4: loop.inc: -; VF4IC4-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP_END]], label [[LOOP]] ; VF4IC4: loop.end: -; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i8 [ [[LD1]], [[LOOP]] ], [ -1, [[LOOP_INC]] ], [ -1, [[MIDDLE_BLOCK]] ], [ [[TMP42]], [[VECTOR_EARLY_EXIT]] ] +; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i8 [ -1, [[MIDDLE_BLOCK]] ], [ [[TMP42]], [[VECTOR_EARLY_EXIT]] ] ; VF4IC4-NEXT: ret i8 [[RETVAL]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/single-value-blend-phis.ll b/llvm/test/Transforms/LoopVectorize/single-value-blend-phis.ll index 219c66f..3bb39b9 100644 --- a/llvm/test/Transforms/LoopVectorize/single-value-blend-phis.ll +++ b/llvm/test/Transforms/LoopVectorize/single-value-blend-phis.ll @@ -29,28 +29,7 @@ define void @single_incoming_phi_no_blend_mask(i64 %a, i64 %b) { ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i16 -; CHECK-NEXT: br label [[LOOP_COND:%.*]] -; CHECK: loop.cond: -; CHECK-NEXT: [[BLEND:%.*]] = phi i16 [ [[IV_TRUNC]], [[LOOP_HEADER]] ] -; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @src, i16 0, i16 [[BLEND]] -; CHECK-NEXT: [[LV:%.*]] = load i16, ptr [[SRC_PTR]], align 1 -; CHECK-NEXT: [[CMP_B:%.*]] = icmp sgt i64 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[CMP_B]], label [[LOOP_NEXT:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.next: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[RES:%.*]] = phi i16 [ [[LV]], [[LOOP_COND]] ], [ 1, [[LOOP_NEXT]] ] -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[IV]] -; CHECK-NEXT: store i16 [[RES]], ptr [[DST_PTR]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31 -; CHECK-NEXT: br i1 [[CMP439]], label [[LOOP_HEADER]], label [[EXIT]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -112,29 +91,7 @@ define void @single_incoming_phi_with_blend_mask(i64 %a, i64 %b) { ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i16 -; CHECK-NEXT: [[CMP_A:%.*]] = icmp ugt i64 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[CMP_A]], label [[LOOP_COND:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.cond: -; CHECK-NEXT: [[BLEND:%.*]] = phi i16 [ [[IV_TRUNC]], [[LOOP_HEADER]] ] -; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @src, i16 0, i16 [[BLEND]] -; CHECK-NEXT: [[LV:%.*]] = load i16, ptr [[SRC_PTR]], align 1 -; CHECK-NEXT: [[CMP_B:%.*]] = icmp sgt i64 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[CMP_B]], label [[LOOP_NEXT:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.next: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[RES:%.*]] = phi i16 [ 0, [[LOOP_HEADER]] ], [ [[LV]], [[LOOP_COND]] ], [ 1, [[LOOP_NEXT]] ] -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[IV]] -; CHECK-NEXT: store i16 [[RES]], ptr [[DST_PTR]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31 -; CHECK-NEXT: br i1 [[CMP439]], label [[LOOP_HEADER]], label [[EXIT]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -201,26 +158,7 @@ define void @multiple_incoming_phi_with_blend_mask(i64 %a, ptr noalias %dst) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i16 -; CHECK-NEXT: [[IV_TRUNC_2:%.*]] = trunc i64 [[IV]] to i16 -; CHECK-NEXT: [[CMP_A:%.*]] = icmp ugt i64 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[CMP_A]], label [[LOOP_NEXT:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.next: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[BLEND:%.*]] = phi i16 [ [[IV_TRUNC]], [[LOOP_HEADER]] ], [ [[IV_TRUNC_2]], [[LOOP_NEXT]] ] -; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @src, i16 0, i16 [[BLEND]] -; CHECK-NEXT: [[LV:%.*]] = load i16, ptr [[SRC_PTR]], align 1 -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i16 [[LV]], ptr [[DST_PTR]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31 -; CHECK-NEXT: br i1 [[CMP439]], label [[LOOP_HEADER]], label [[EXIT]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -297,29 +235,7 @@ define void @single_incoming_needs_predication(i64 %a, i64 %b) { ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i16 -; CHECK-NEXT: [[CMP_A:%.*]] = icmp ugt i64 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[CMP_A]], label [[LOOP_COND:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.cond: -; CHECK-NEXT: [[BLEND:%.*]] = phi i16 [ [[IV_TRUNC]], [[LOOP_HEADER]] ] -; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @src, i16 0, i16 [[BLEND]] -; CHECK-NEXT: [[LV:%.*]] = load i16, ptr [[SRC_PTR]], align 1 -; CHECK-NEXT: [[CMP_B:%.*]] = icmp sgt i64 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[CMP_B]], label [[LOOP_NEXT:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.next: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[RES:%.*]] = phi i16 [ 0, [[LOOP_HEADER]] ], [ [[LV]], [[LOOP_COND]] ], [ 1, [[LOOP_NEXT]] ] -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[IV]] -; CHECK-NEXT: store i16 [[RES]], ptr [[DST_PTR]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 63 -; CHECK-NEXT: br i1 [[CMP439]], label [[LOOP_HEADER]], label [[EXIT]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -371,20 +287,7 @@ define void @duplicated_incoming_blocks_blend(i32 %x, ptr %ptr) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[ADD_I:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[C_0:%.*]] = icmp ugt i32 [[IV]], [[X:%.*]] -; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[P:%.*]] = phi i32 [ [[IV]], [[LOOP_HEADER]] ], [ [[IV]], [[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_PTR:%.*]] = getelementptr i32, ptr [[PTR]], i32 [[P]] -; CHECK-NEXT: store i32 [[P]], ptr [[GEP_PTR]], align 4 -; CHECK-NEXT: [[ADD_I]] = add nsw i32 [[P]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD_I]], 1000 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_HEADER]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/single_early_exit.ll b/llvm/test/Transforms/LoopVectorize/single_early_exit.ll index 04f04a8..3500c5c 100644 --- a/llvm/test/Transforms/LoopVectorize/single_early_exit.ll +++ b/llvm/test/Transforms/LoopVectorize/single_early_exit.ll @@ -34,22 +34,8 @@ define i64 @same_exit_block_phi_of_consts() { ; CHECK-NEXT: br label [[LOOP_END:%.*]] ; CHECK: vector.early.exit: ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 0, [[LOOP]] ], [ 1, [[LOOP_INC]] ], [ 1, [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 1, [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -108,21 +94,7 @@ define i64 @diff_exit_block_phi_of_consts() { ; CHECK: middle.block: ; CHECK-NEXT: br label [[LOOP_END:%.*]] ; CHECK: vector.early.exit: -; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.early.exit: ; CHECK-NEXT: ret i64 0 ; CHECK: loop.end: @@ -292,16 +264,7 @@ define i32 @diff_blocks_invariant_early_exit_cond(ptr %s) { ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: vector.early.exit: -; CHECK-NEXT: br label [[EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IND:%.*]] = phi i32 [ -10, [[SCALAR_PH:%.*]] ], [ [[IND_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-NEXT: br i1 [[COND]], label [[FOR_INC]], label [[EARLY_EXIT]] -; CHECK: for.inc: -; CHECK-NEXT: [[IND_NEXT]] = add nsw i32 [[IND]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IND_NEXT]], 266 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: early.exit: ; CHECK-NEXT: tail call void @abort() ; CHECK-NEXT: unreachable diff --git a/llvm/test/Transforms/LoopVectorize/single_early_exit_live_outs.ll b/llvm/test/Transforms/LoopVectorize/single_early_exit_live_outs.ll index 54408b2..79821b8 100644 --- a/llvm/test/Transforms/LoopVectorize/single_early_exit_live_outs.ll +++ b/llvm/test/Transforms/LoopVectorize/single_early_exit_live_outs.ll @@ -36,22 +36,8 @@ define i64 @same_exit_block_pre_inc_use1() { ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -116,24 +102,8 @@ define i32 @same_exit_block_pre_inc_use1_iv64_endi32_step2() { ; CHECK-NEXT: [[TMP11:%.*]] = mul i32 [[DOTCAST]], 2 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i32 9, [[TMP11]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[INDEX2:%.*]] = phi i32 [ [[INDEX2_NEXT:%.*]], [[LOOP_INC]] ], [ 9, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[INDEX2_NEXT]] = add i32 [[INDEX2]], 2 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i32 [ [[INDEX2]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i32 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i32 [[RETVAL]] ; entry: @@ -197,23 +167,8 @@ define i32 @same_exit_block_pre_inc_use1_iv128_endi32_step2() { ; CHECK-NEXT: [[TMP10:%.*]] = mul i32 [[DOTCAST]], 2 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i32 9, [[TMP10]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i128 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[INDEX2:%.*]] = phi i32 [ [[INDEX2_NEXT:%.*]], [[LOOP_INC]] ], [ 9, [[SCALAR_PH]] ] -; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP_INC]] ], [ [[P1]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[PTR]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], 3 -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i128 [[INDEX]], 1 -; CHECK-NEXT: [[INDEX2_NEXT]] = add i32 [[INDEX2]], 2 -; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i128 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i32 [ [[INDEX2]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i32 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i32 [[RETVAL]] ; entry: @@ -277,24 +232,8 @@ define float @same_exit_block_pre_inc_use1_iv64_endf32() { ; CHECK-NEXT: [[TMP11:%.*]] = fmul fast float 1.000000e+00, [[DOTCAST]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = fadd fast float 9.000000e+00, [[TMP11]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[INDEX2:%.*]] = phi float [ [[INDEX2_NEXT:%.*]], [[LOOP_INC]] ], [ 9.000000e+00, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[INDEX2_NEXT]] = fadd fast float [[INDEX2]], 1.000000e+00 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi float [ [[INDEX2]], [[LOOP]] ], [ 1.230000e+02, [[LOOP_INC]] ], [ 1.230000e+02, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi float [ 1.230000e+02, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret float [[RETVAL]] ; entry: @@ -360,24 +299,8 @@ define ptr @same_exit_block_pre_inc_use1_iv64_endptr() { ; CHECK-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 5 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = getelementptr i8, ptr [[P2]], i64 [[TMP20]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[INDEX2:%.*]] = phi ptr [ [[INDEX2_NEXT:%.*]], [[LOOP_INC]] ], [ [[P2]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[INDEX2_NEXT]] = getelementptr i8, ptr [[INDEX2]], i64 5 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[INDEX2]], [[LOOP]] ], [ [[P1]], [[LOOP_INC]] ], [ [[P1]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[P1]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret ptr [[RETVAL]] ; entry: @@ -438,19 +361,8 @@ define ptr @same_exit_block_pre_inc_use1_ivptr() { ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP8]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[P1]], [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[PTR]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], 72 -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[PTR_NEXT]], [[PTREND]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTR]], [[LOOP]] ], [ [[PTREND]], [[LOOP_INC]] ], [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret ptr [[RETVAL]] ; entry: @@ -512,23 +424,8 @@ define i64 @same_exit_block_pre_inc1_use_inv_cond(i1 %cond) { ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP11]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: [[CMP4:%.*]] = select i1 [[COND]], i1 [[CMP3]], i1 false -; CHECK-NEXT: br i1 [[CMP4]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -592,22 +489,8 @@ define i64 @same_exit_block_pre_inc_use1_gep_two_indices() { ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i8], ptr [[P1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], ptr [[P2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -670,22 +553,8 @@ define i64 @same_exit_block_pre_inc_use1_alloca_diff_type() { ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -745,22 +614,8 @@ define i64 @same_exit_block_pre_inc_use2() { ; CHECK-NEXT: br label [[LOOP_END:%.*]] ; CHECK: vector.early.exit: ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[LOOP]] ], [ [[INDEX]], [[LOOP_INC]] ], [ 66, [[MIDDLE_BLOCK]] ], [ 67, [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 66, [[MIDDLE_BLOCK]] ], [ 67, [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -823,22 +678,8 @@ define i64 @same_exit_block_pre_inc_use3() { ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[INDEX_LCSSA:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ [[INDEX]], [[LOOP]] ], [ 66, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[INDEX_LCSSA:%.*]] = phi i64 [ 66, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[INDEX_LCSSA]] ; entry: @@ -902,20 +743,8 @@ define i64 @same_exit_block_pre_inc_use4() { ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP8]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i64, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i64 [[INDEX]], [[LD1]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -976,22 +805,8 @@ define i64 @same_exit_block_post_inc_use() { ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[INDEX_NEXT]], [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -1051,19 +866,8 @@ define ptr @same_exit_block_post_inc_use1_ivptr() { ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 1 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP9]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[P1]], [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[PTR]], align 1 -; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], 72 -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[PTR_NEXT]], [[PTREND]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTR_NEXT]], [[LOOP]] ], [ [[PTREND]], [[LOOP_INC]] ], [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret ptr [[RETVAL]] ; entry: @@ -1123,22 +927,8 @@ define i64 @same_exit_block_post_inc_use2() { ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 1 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP11]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX_NEXT]], [[LOOP]] ], [ [[INDEX]], [[LOOP_INC]] ], [ 66, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 66, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -1200,27 +990,11 @@ define i64 @diff_exit_block_pre_inc_use1() { ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP6]], i1 true) ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] -; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.early.exit: -; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: ret i64 [[RETVAL1]] +; CHECK-NEXT: ret i64 [[EARLY_EXIT_VALUE]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RETVAL2]] +; CHECK-NEXT: ret i64 67 ; entry: %p1 = alloca [1024 x i8] @@ -1282,27 +1056,11 @@ define i64 @diff_exit_block_pre_inc_use2() { ; CHECK: middle.block: ; CHECK-NEXT: br label [[LOOP_END:%.*]] ; CHECK: vector.early.exit: -; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.early.exit: -; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ 67, [[LOOP]] ], [ 67, [[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: ret i64 [[RETVAL1]] +; CHECK-NEXT: ret i64 67 ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ 66, [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RETVAL2]] +; CHECK-NEXT: ret i64 66 ; entry: %p1 = alloca [1024 x i8] @@ -1367,27 +1125,11 @@ define i64 @diff_exit_block_pre_inc_use3() { ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP6]], i1 true) ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX2]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] -; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.early.exit: -; CHECK-NEXT: [[INDEX_LCSSA:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: ret i64 [[INDEX_LCSSA]] +; CHECK-NEXT: ret i64 [[EARLY_EXIT_VALUE]] ; CHECK: loop.end: -; CHECK-NEXT: [[INDEX_LCSSA1:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ 66, [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[INDEX_LCSSA1]] +; CHECK-NEXT: ret i64 66 ; entry: %p1 = alloca [1024 x i8] @@ -1450,27 +1192,11 @@ define i64 @diff_exit_block_post_inc_use1() { ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP13]], i1 true) ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] -; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.early.exit: -; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: ret i64 [[RETVAL1]] +; CHECK-NEXT: ret i64 [[EARLY_EXIT_VALUE]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[INDEX_NEXT]], [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RETVAL2]] +; CHECK-NEXT: ret i64 67 ; entry: %p1 = alloca [1024 x i8] @@ -1536,27 +1262,11 @@ define i64 @diff_exit_block_post_inc_use2() { ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 1 ; CHECK-NEXT: [[TMP21:%.*]] = add i64 3, [[TMP11]] -; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; CHECK: loop.inc: -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.early.exit: -; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[INDEX_NEXT]], [[LOOP]] ], [ [[TMP21]], [[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: ret i64 [[RETVAL1]] +; CHECK-NEXT: ret i64 [[TMP21]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ 66, [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RETVAL2]] +; CHECK-NEXT: ret i64 66 ; entry: %p1 = alloca [1024 x i8] @@ -1624,29 +1334,11 @@ define i64 @diff_exit_block_post_inc_use3(i64 %start) { ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP11]], 1 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 [[START]], [[TMP12]] -; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ [[INDEX2_NEXT:%.*]], [[LOOP_INC]] ], [ [[START]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[INDEX2_NEXT]] = add i64 [[INDEX2]], 1 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; CHECK: loop.inc: -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.early.exit: -; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[INDEX2_NEXT]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: ret i64 [[RETVAL1]] +; CHECK-NEXT: ret i64 [[EARLY_EXIT_VALUE]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[INDEX2]], [[LOOP_INC]] ], [ [[IND_ESCAPE]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RETVAL2]] +; CHECK-NEXT: ret i64 [[IND_ESCAPE]] ; entry: %p1 = alloca [1024 x i8] @@ -1713,21 +1405,8 @@ define i64 @loop_contains_safe_call() { ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP9]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load float, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[SQRT:%.*]] = tail call fast float @llvm.sqrt.f32(float [[LD1]]) -; CHECK-NEXT: [[CMP:%.*]] = fcmp fast ult float [[SQRT]], 3.000000e+00 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -1788,21 +1467,8 @@ define i64 @loop_contains_safe_div() { ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP9]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[LD1]], 20000 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[DIV]], 1 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -1864,22 +1530,8 @@ define i64 @loop_contains_load_after_early_exit(ptr dereferenceable(1024) align( ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP11]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LD1]], 1 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i64, ptr [[ARRAYIDX2]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[LD2]], [[LOOP_INC]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -2071,22 +1723,8 @@ define i64 @same_exit_block_pre_inc_use1_deref_ptrs(ptr dereferenceable(1024) %p ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll b/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll index 66300ed..19ab96d 100644 --- a/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll +++ b/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll @@ -41,18 +41,7 @@ define void @pr75298_store_reduction_value_in_folded_loop(i64 %iv.start) optsize ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: store i32 [[TMP6]], ptr @a, align 4 -; CHECK-NEXT: br label [[EXIT_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_START]], [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr @c, align 4 -; CHECK-NEXT: [[RED_NEXT]] = xor i32 [[RED]], [[L]] -; CHECK-NEXT: store i32 [[RED_NEXT]], ptr @a, align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 7 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT_LOOPEXIT]], label [[LOOP]] ; CHECK: exit.loopexit: ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: diff --git a/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll index 7027d85..ca32808 100644 --- a/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll +++ b/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll @@ -23,19 +23,9 @@ define float @pr70988() { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT3]], 1022 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], 1.000000e+00 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw nsw i32 [[INDEX]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[INDEX_NEXT]], 1021 -; CHECK-NEXT: br i1 [[COND]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi float [ [[RDX_NEXT]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[DOTLCSSA]] +; CHECK-NEXT: ret float [[TMP5]] ; ; CHECK-ALM-LABEL: define float @pr70988() { ; CHECK-ALM-NEXT: entry: @@ -56,19 +46,9 @@ define float @pr70988() { ; CHECK-ALM-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT3]], 1022 ; CHECK-ALM-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-ALM: middle.block: -; CHECK-ALM-NEXT: br label [[EXIT:%.*]] -; CHECK-ALM: scalar.ph: ; CHECK-ALM-NEXT: br label [[LOOP:%.*]] -; CHECK-ALM: loop: -; CHECK-ALM-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ] -; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] -; CHECK-ALM-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], 1.000000e+00 -; CHECK-ALM-NEXT: [[INDEX_NEXT]] = add nuw nsw i32 [[INDEX]], 1 -; CHECK-ALM-NEXT: [[COND:%.*]] = icmp ult i32 [[INDEX_NEXT]], 1021 -; CHECK-ALM-NEXT: br i1 [[COND]], label [[LOOP]], label [[EXIT]] ; CHECK-ALM: exit: -; CHECK-ALM-NEXT: [[DOTLCSSA:%.*]] = phi float [ [[RDX_NEXT]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] -; CHECK-ALM-NEXT: ret float [[DOTLCSSA]] +; CHECK-ALM-NEXT: ret float [[TMP5]] ; entry: br label %loop @@ -123,21 +103,9 @@ define float @pr72720reduction_using_active_lane_mask(ptr %src) { ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[NARROW:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[NARROW]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP]], align 4 -; CHECK-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], [[L]] -; CHECK-NEXT: [[EC:%.*]] = icmp ult i32 [[NARROW]], 15 -; CHECK-NEXT: br i1 [[EC]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi float [ [[RDX_NEXT]], [[LOOP]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[DOTLCSSA]] +; CHECK-NEXT: ret float [[TMP13]] ; ; CHECK-ALM-LABEL: define float @pr72720reduction_using_active_lane_mask( ; CHECK-ALM-SAME: ptr [[SRC:%.*]]) { @@ -173,21 +141,9 @@ define float @pr72720reduction_using_active_lane_mask(ptr %src) { ; CHECK-ALM-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 ; CHECK-ALM-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-ALM: middle.block: -; CHECK-ALM-NEXT: br label [[EXIT:%.*]] -; CHECK-ALM: scalar.ph: ; CHECK-ALM-NEXT: br label [[LOOP:%.*]] -; CHECK-ALM: loop: -; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[NARROW:%.*]], [[LOOP]] ] -; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] -; CHECK-ALM-NEXT: [[NARROW]] = add nuw nsw i32 [[IV]], 1 -; CHECK-ALM-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[SRC]], i32 [[IV]] -; CHECK-ALM-NEXT: [[L:%.*]] = load float, ptr [[GEP]], align 4 -; CHECK-ALM-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], [[L]] -; CHECK-ALM-NEXT: [[EC:%.*]] = icmp ult i32 [[NARROW]], 15 -; CHECK-ALM-NEXT: br i1 [[EC]], label [[LOOP]], label [[EXIT]] ; CHECK-ALM: exit: -; CHECK-ALM-NEXT: [[DOTLCSSA:%.*]] = phi float [ [[RDX_NEXT]], [[LOOP]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] -; CHECK-ALM-NEXT: ret float [[DOTLCSSA]] +; CHECK-ALM-NEXT: ret float [[TMP11]] ; entry: br label %loop @@ -229,19 +185,9 @@ define float @fadd_reduction_with_live_in(float %inc) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1002 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SUM_NEXT]] = fadd float [[SUM]], [[INC]] -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[LCSSA:%.*]] = phi float [ [[SUM_NEXT]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[LCSSA]] +; CHECK-NEXT: ret float [[TMP5]] ; ; CHECK-ALM-LABEL: define float @fadd_reduction_with_live_in( ; CHECK-ALM-SAME: float [[INC:%.*]]) { @@ -263,19 +209,9 @@ define float @fadd_reduction_with_live_in(float %inc) { ; CHECK-ALM-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1002 ; CHECK-ALM-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-ALM: middle.block: -; CHECK-ALM-NEXT: br label [[EXIT:%.*]] -; CHECK-ALM: scalar.ph: ; CHECK-ALM-NEXT: br label [[LOOP:%.*]] -; CHECK-ALM: loop: -; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-ALM-NEXT: [[SUM:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ] -; CHECK-ALM-NEXT: [[SUM_NEXT]] = fadd float [[SUM]], [[INC]] -; CHECK-ALM-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-ALM-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000 -; CHECK-ALM-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK-ALM: exit: -; CHECK-ALM-NEXT: [[LCSSA:%.*]] = phi float [ [[SUM_NEXT]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] -; CHECK-ALM-NEXT: ret float [[LCSSA]] +; CHECK-ALM-NEXT: ret float [[TMP5]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/strided-accesses-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/strided-accesses-interleave-only.ll index 97f686c..dcab18f 100644 --- a/llvm/test/Transforms/LoopVectorize/strided-accesses-interleave-only.ll +++ b/llvm/test/Transforms/LoopVectorize/strided-accesses-interleave-only.ll @@ -22,16 +22,6 @@ define void @test_variable_stride(ptr %dst, i32 %scale) { ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IDX:%.*]] = mul i32 [[IV]], [[SCALE]] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[DST]], i32 [[IDX]] -; CHECK-NEXT: store i32 [[IV]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll index 87eebb7b..a852b73 100644 --- a/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll +++ b/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll @@ -54,16 +54,6 @@ define i32 @test(ptr %vf1, i64 %n) { ; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP18:%.*]] = alloca i8, i64 [[N]], align 16 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[VF1]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store ptr [[TMP18]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV]], 200 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret i32 0 ; diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll index 4bc4e54..00e04c7 100644 --- a/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll +++ b/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll @@ -34,15 +34,6 @@ define void @canonical_small_tc_i8(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 15 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -94,15 +85,6 @@ define void @canonical_upper_limit_i8(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 255 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -154,15 +136,6 @@ define void @canonical_lower_limit_i16(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 257 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -214,15 +187,6 @@ define void @canonical_upper_limit_i16(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 65535 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -274,15 +238,6 @@ define void @canonical_lower_limit_i32(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 65537 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -334,15 +289,6 @@ define void @canonical_upper_limit_i32(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 4294967295 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -394,15 +340,6 @@ define void @canonical_lower_limit_i64(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 4294967297 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -454,15 +391,6 @@ define void @canonical_upper_limit_i64(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], -1 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -514,15 +442,6 @@ define void @canonical_lower_limit_i128(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i256 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i256 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i256 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i256 [[IV_NEXT]], 18446744073709551617 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll index 6fd7c70..b6f43aa 100644 --- a/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll +++ b/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll @@ -55,22 +55,6 @@ define void @tail_fold_switch(ptr %dst, i32 %0) { ; CHECK-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: switch i32 [[TMP0]], label %[[LOOP_LATCH]] [ -; CHECK-NEXT: i32 0, label %[[LOOP_LATCH]] -; CHECK-NEXT: i32 1, label %[[IF_THEN:.*]] -; CHECK-NEXT: ] -; CHECK: [[IF_THEN]]: -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 4 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll index 45c56a0..3bc5da1 100644 --- a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll +++ b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll @@ -53,18 +53,9 @@ define void @VF1-VPlanExe(ptr %dst) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i32 0, ptr [[DST_PTR]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; entry: br label %for.body @@ -132,17 +123,9 @@ define void @VF1-VPWidenCanonicalIVRecipeExe(ptr %ptr1) { ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[PTR1]], [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: store double 0.000000e+00, ptr [[ADDR]], align 8 -; CHECK-NEXT: [[PTR]] = getelementptr inbounds double, ptr [[ADDR]], i64 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[PTR]], [[PTR2]] -; CHECK-NEXT: br i1 [[COND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; entry: %ptr2 = getelementptr inbounds double, ptr %ptr1, i64 15 diff --git a/llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll b/llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll index 387a02e..8a16293 100644 --- a/llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll +++ b/llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll @@ -133,26 +133,7 @@ define void @ext_cmp(ptr %src.1, ptr %src.2, ptr noalias %dst) { ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds i16, ptr [[SRC_1]], i64 [[IV]] -; CHECK-NEXT: [[I2:%.*]] = load i16, ptr [[GEP_SRC_1]], align 2 -; CHECK-NEXT: [[I3:%.*]] = sext i16 [[I2]] to i32 -; CHECK-NEXT: [[C_1:%.*]] = icmp sgt i32 0, [[I3]] -; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr inbounds i8, ptr [[SRC_2]], i64 [[IV]] -; CHECK-NEXT: [[I4:%.*]] = load i8, ptr [[GEP_SRC_2]], align 2 -; CHECK-NEXT: [[I5:%.*]] = zext i8 [[I4]] to i32 -; CHECK-NEXT: [[I6:%.*]] = select i1 [[C_1]], i32 0, i32 [[I5]] -; CHECK-NEXT: [[I7:%.*]] = and i32 [[I6]], 0 -; CHECK-NEXT: [[I8:%.*]] = trunc nuw nsw i32 [[I7]] to i16 -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i16 [[I8]], ptr [[GEP_DST]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/trunc-loads-p16.ll b/llvm/test/Transforms/LoopVectorize/trunc-loads-p16.ll index 83ecf1a..6e7cdba 100644 --- a/llvm/test/Transforms/LoopVectorize/trunc-loads-p16.ll +++ b/llvm/test/Transforms/LoopVectorize/trunc-loads-p16.ll @@ -26,21 +26,7 @@ define void @pr77468(ptr noalias %src, ptr noalias %dst, i1 %x) { ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i32, ptr [[SRC]], i16 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 1 -; CHECK-NEXT: [[X_EXT:%.*]] = zext i1 [[X]] to i32 -; CHECK-NEXT: [[AND:%.*]] = and i32 [[X_EXT]], [[L]] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i16 [[IV]] -; CHECK-NEXT: [[T:%.*]] = trunc i32 [[AND]] to i16 -; CHECK-NEXT: store i16 [[T]], ptr [[GEP_DST]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i16 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll b/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll index 2f5f157..2aebb73 100644 --- a/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll @@ -18,11 +18,7 @@ define i8 @reduction_and_trunc(ptr noalias nocapture %ptr) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[AND_LCSSA_OFF0:%.*]] = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> [[TMP2]]) ; CHECK-NEXT: ret i8 [[AND_LCSSA_OFF0]] @@ -64,11 +60,7 @@ define i16 @reduction_or_trunc(ptr noalias nocapture %ptr) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[XOR_LCSSA_OFF0:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[TMP2]]) ; CHECK-NEXT: ret i16 [[XOR_LCSSA_OFF0]] @@ -110,11 +102,7 @@ define i16 @reduction_xor_trunc(ptr noalias nocapture %ptr) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[XOR_LCSSA_OFF0:%.*]] = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> [[TMP2]]) ; CHECK-NEXT: ret i16 [[XOR_LCSSA_OFF0]] diff --git a/llvm/test/Transforms/LoopVectorize/trunc-shifts.ll b/llvm/test/Transforms/LoopVectorize/trunc-shifts.ll index 4a372b5..498c58d 100644 --- a/llvm/test/Transforms/LoopVectorize/trunc-shifts.ll +++ b/llvm/test/Transforms/LoopVectorize/trunc-shifts.ll @@ -24,20 +24,7 @@ define void @test_pr47927_lshr_const_shift_ops(ptr %dst, i32 %f) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[L:%.*]] = lshr i32 [[F]], 18 -; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8 -; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]] -; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100 -; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -81,20 +68,7 @@ define void @test_shl_const_shift_ops(ptr %dst, i32 %f) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[L:%.*]] = shl i32 [[F]], 18 -; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8 -; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]] -; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100 -; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -138,20 +112,7 @@ define void @test_ashr_const_shift_ops(ptr %dst, i32 %f) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[L:%.*]] = ashr i32 [[F]], 18 -; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8 -; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]] -; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100 -; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -195,22 +156,7 @@ define void @test_shl_const_shifted_op(ptr %dst, i32 %f) { ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]] -; CHECK-NEXT: [[LV:%.*]] = load i8, ptr [[GEP]], align 1 -; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[LV]] to i32 -; CHECK-NEXT: [[L:%.*]] = shl i32 19, [[ZEXT]] -; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8 -; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100 -; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -257,22 +203,7 @@ define void @test_lshr_by_18(ptr %A) { ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV_EXT]] -; CHECK-NEXT: [[LV:%.*]] = load i8, ptr [[GEP]], align 1 -; CHECK-NEXT: [[LV_EXT:%.*]] = zext i8 [[LV]] to i32 -; CHECK-NEXT: [[L:%.*]] = lshr i32 [[LV_EXT]], 18 -; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8 -; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100 -; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -318,22 +249,7 @@ define void @test_lshr_by_4(ptr %A) { ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV_EXT]] -; CHECK-NEXT: [[LV:%.*]] = load i8, ptr [[GEP]], align 1 -; CHECK-NEXT: [[LV_EXT:%.*]] = zext i8 [[LV]] to i32 -; CHECK-NEXT: [[L:%.*]] = lshr i32 [[LV_EXT]], 4 -; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8 -; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100 -; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/uitofp-preserve-nneg.ll b/llvm/test/Transforms/LoopVectorize/uitofp-preserve-nneg.ll index d6273e0..b85f274 100644 --- a/llvm/test/Transforms/LoopVectorize/uitofp-preserve-nneg.ll +++ b/llvm/test/Transforms/LoopVectorize/uitofp-preserve-nneg.ll @@ -22,19 +22,7 @@ define void @uitofp_preserve_nneg(ptr %result, i32 %size, float %y) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER4:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[CONV:%.*]] = uitofp nneg i32 [[TMP4]] to float -; CHECK-NEXT: [[TMP5:%.*]] = fmul float [[CONV]], [[Y]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = zext nneg i32 [[TMP4]] to i64 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[RESULT]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store float [[TMP5]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[TMP4]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], 256 -; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_EXIT]] ; CHECK: for.exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll index ccb301f..985a9a2 100644 --- a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll @@ -21,21 +21,6 @@ define void @blend_uniform_iv_trunc(i1 %c) { ; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[IV_TRUNC_2:%.*]] = trunc i64 [[IV]] to i16 -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_NEXT:.*]], label %[[LOOP_LATCH]] -; CHECK: [[LOOP_NEXT]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[BLEND:%.*]] = phi i16 [ poison, %[[LOOP_HEADER]] ], [ [[IV_TRUNC_2]], %[[LOOP_NEXT]] ] -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i16 [[BLEND]] -; CHECK-NEXT: store i16 0, ptr [[DST_PTR]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31 -; CHECK-NEXT: br i1 [[CMP439]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -79,20 +64,6 @@ define void @blend_uniform_iv(i1 %c) { ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_NEXT:.*]], label %[[LOOP_LATCH]] -; CHECK: [[LOOP_NEXT]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[BLEND:%.*]] = phi i64 [ poison, %[[LOOP_HEADER]] ], [ [[IV]], %[[LOOP_NEXT]] ] -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[BLEND]] -; CHECK-NEXT: store i16 0, ptr [[DST_PTR]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31 -; CHECK-NEXT: br i1 [[CMP439]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -150,25 +121,6 @@ define void @blend_chain_iv(i1 %c) { ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_NEXT:.*]], label %[[LOOP_LATCH]] -; CHECK: [[LOOP_NEXT]]: -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_NEXT_2:.*]], label %[[LOOP_NEXT_3:.*]] -; CHECK: [[LOOP_NEXT_2]]: -; CHECK-NEXT: br label %[[LOOP_NEXT_3]] -; CHECK: [[LOOP_NEXT_3]]: -; CHECK-NEXT: [[BLEND_1:%.*]] = phi i64 [ undef, %[[LOOP_NEXT]] ], [ [[IV]], %[[LOOP_NEXT_2]] ] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[BLEND:%.*]] = phi i64 [ undef, %[[LOOP_HEADER]] ], [ [[BLEND_1]], %[[LOOP_NEXT_3]] ] -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[BLEND]] -; CHECK-NEXT: store i16 0, ptr [[DST_PTR]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31 -; CHECK-NEXT: br i1 [[CMP439]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -275,22 +227,6 @@ define void @redundant_branch_and_blends_without_mask(ptr %A) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_IV:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_IV]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[L]], 10 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[P_1:%.*]] = phi i32 [ [[L]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[P_2:%.*]] = phi i32 [ [[ADD]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[RES:%.*]] = add i32 [[P_1]], [[P_2]] -; CHECK-NEXT: store i32 [[RES]], ptr [[GEP_IV]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll index 2c49fda..571c55c 100644 --- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll @@ -24,7 +24,8 @@ define void @ld_div1_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -64,10 +65,11 @@ define void @ld_div2_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -112,10 +114,11 @@ define void @ld_div3_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -167,10 +170,11 @@ define void @ld_div1_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -214,10 +218,11 @@ define void @ld_div2_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store i64 [[TMP8]], ptr [[TMP6]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -269,10 +274,11 @@ define void @ld_div3_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -324,7 +330,7 @@ define void @ld_div1_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -379,7 +385,7 @@ define void @ld_div2_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -426,7 +432,7 @@ define void @ld_div3_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store i64 [[TMP8]], ptr [[TMP6]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -467,7 +473,7 @@ define void @ld_div1_step1_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store <2 x i64> [[TMP2]], ptr [[TMP3]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -516,7 +522,7 @@ define void @ld_div2_step1_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -565,7 +571,7 @@ define void @ld_div3_step1_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -621,7 +627,7 @@ define void @ld_div1_step2_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -669,7 +675,7 @@ define void @ld_div2_step2_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store i64 [[TMP9]], ptr [[TMP7]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -725,7 +731,7 @@ define void @ld_div3_step2_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -781,7 +787,7 @@ define void @ld_div1_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -837,7 +843,7 @@ define void @ld_div2_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -885,7 +891,7 @@ define void @ld_div3_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store i64 [[TMP9]], ptr [[TMP7]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -933,10 +939,11 @@ define void @test_step_is_not_invariant(ptr %A) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 2) ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], 56 -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll index c7525fb..6cf82fc 100644 --- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll @@ -24,7 +24,8 @@ define void @ld_and_neg1_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -64,10 +65,11 @@ define void @ld_and_neg2_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -112,10 +114,11 @@ define void @ld_and_neg3_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -167,10 +170,11 @@ define void @ld_and_neg1_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -212,10 +216,11 @@ define void @ld_and_neg2_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store i64 [[TMP5]], ptr [[TMP7]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -267,7 +272,7 @@ define void @ld_and_neg1_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -322,7 +327,7 @@ define void @ld_and_neg2_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -371,7 +376,7 @@ define void @ld_and_neg2_step1_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -427,7 +432,7 @@ define void @ld_and_neg2_step2_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -483,7 +488,7 @@ define void @ld_and_neg2_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -539,7 +544,7 @@ define void @ld_and_neg3_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_div_urem.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_div_urem.ll index 27cefa2..9ed2240 100644 --- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_div_urem.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_div_urem.ll @@ -58,7 +58,8 @@ define void @ld_div2_urem3_1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: br i1 [[TMP36]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -130,10 +131,11 @@ define void @ld_div2_urem3_2(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[VEC_IND]], splat (i64 8) ; CHECK-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -203,10 +205,11 @@ define void @ld_div4(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[VEC_IND]], splat (i64 8) ; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -247,10 +250,11 @@ define void @ld_div8_urem3(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store <8 x i64> [[BROADCAST_SPLAT]], ptr [[TMP5]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_lshr.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_lshr.ll index cee53b5..2b5d0f3 100644 --- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_lshr.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_lshr.ll @@ -25,7 +25,8 @@ define void @ld_lshr0_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_lshr0_step1_start0_ind1 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -46,7 +47,8 @@ define void @ld_lshr0_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -86,10 +88,11 @@ define void @ld_lshr1_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF2-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF2-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_lshr1_step1_start0_ind1 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -123,10 +126,11 @@ define void @ld_lshr1_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; VF4-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF4-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF4-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -166,10 +170,11 @@ define void @ld_lshr2_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF2-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; VF2-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_lshr2_step1_start0_ind1 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -189,10 +194,11 @@ define void @ld_lshr2_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF4-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; VF4-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -244,10 +250,11 @@ define void @ld_lshr0_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; VF2-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_lshr0_step2_start0_ind1 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -296,10 +303,11 @@ define void @ld_lshr0_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) ; VF4-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -343,10 +351,11 @@ define void @ld_lshr1_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: store i64 [[TMP8]], ptr [[TMP6]], align 8 ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF2-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; VF2-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_lshr1_step2_start0_ind1 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -379,10 +388,11 @@ define void @ld_lshr1_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: store i64 [[TMP14]], ptr [[TMP10]], align 8 ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF4-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; VF4-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -434,7 +444,7 @@ define void @ld_lshr0_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -486,7 +496,7 @@ define void @ld_lshr0_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -541,7 +551,7 @@ define void @ld_lshr1_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -593,7 +603,7 @@ define void @ld_lshr1_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -643,7 +653,7 @@ define void @ld_lshr1_step1_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; VF2-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; VF2-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; VF2-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -681,7 +691,7 @@ define void @ld_lshr1_step1_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; VF4-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 996 -; VF4-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; VF4-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -729,7 +739,7 @@ define void @ld_lshr1_step2_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: store i64 [[TMP9]], ptr [[TMP7]], align 8 ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; VF2-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; VF2-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -766,7 +776,7 @@ define void @ld_lshr1_step2_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: store i64 [[TMP15]], ptr [[TMP11]], align 8 ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 496 -; VF4-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; VF4-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -822,7 +832,7 @@ define void @ld_lshr1_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; VF2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -875,7 +885,7 @@ define void @ld_lshr1_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; VF4-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -931,7 +941,7 @@ define void @ld_lshr2_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; VF2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -984,7 +994,7 @@ define void @ld_lshr2_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; VF4-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll index 0f8289d..12851d7 100644 --- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll @@ -35,7 +35,8 @@ define void @ld_div1_step1_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_div1_step1_start0_ind2 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -76,7 +77,8 @@ define void @ld_div1_step1_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -121,10 +123,11 @@ define void @ld_div2_step1_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP5]], align 8 ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF2-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF2-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_div2_step1_start0_ind2 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -162,10 +165,11 @@ define void @ld_div2_step1_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -218,10 +222,11 @@ define void @ld_div3_step1_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_div3_step1_start0_ind2 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -259,10 +264,11 @@ define void @ld_div3_step1_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -322,10 +328,11 @@ define void @ld_div1_step2_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_div1_step2_start0_ind2 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -378,10 +385,11 @@ define void @ld_div1_step2_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -441,10 +449,11 @@ define void @ld_div2_step2_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_div2_step2_start0_ind2 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -497,10 +506,11 @@ define void @ld_div2_step2_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -560,10 +570,11 @@ define void @ld_div3_step2_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_div3_step2_start0_ind2 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -616,10 +627,11 @@ define void @ld_div3_step2_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -679,7 +691,7 @@ define void @ld_div1_step3_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -735,7 +747,7 @@ define void @ld_div1_step3_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -798,7 +810,7 @@ define void @ld_div2_step3_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -854,7 +866,7 @@ define void @ld_div2_step3_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -917,7 +929,7 @@ define void @ld_div3_step3_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -973,7 +985,7 @@ define void @ld_div3_step3_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1030,7 +1042,7 @@ define void @ld_div1_step1_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1072,7 +1084,7 @@ define void @ld_div1_step1_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 996 -; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1129,7 +1141,7 @@ define void @ld_div2_step1_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1171,7 +1183,7 @@ define void @ld_div2_step1_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 996 -; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1228,7 +1240,7 @@ define void @ld_div3_step1_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1270,7 +1282,7 @@ define void @ld_div3_step1_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 996 -; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1334,7 +1346,7 @@ define void @ld_div1_step2_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1391,7 +1403,7 @@ define void @ld_div1_step2_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 496 -; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1455,7 +1467,7 @@ define void @ld_div2_step2_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1512,7 +1524,7 @@ define void @ld_div2_step2_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 496 -; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1576,7 +1588,7 @@ define void @ld_div3_step2_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1633,7 +1645,7 @@ define void @ld_div3_step2_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 496 -; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1697,7 +1709,7 @@ define void @ld_div1_step3_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1754,7 +1766,7 @@ define void @ld_div1_step3_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1818,7 +1830,7 @@ define void @ld_div2_step3_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] +; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1875,7 +1887,7 @@ define void @ld_div2_step3_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] +; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1939,7 +1951,7 @@ define void @ld_div3_step3_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] +; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1996,7 +2008,7 @@ define void @ld_div3_step3_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] +; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll b/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll index 5f83e392..5d07341 100644 --- a/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll +++ b/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll @@ -23,26 +23,7 @@ define void @test_not_first_lane_only_constant(ptr %A, ptr noalias %B) { ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i16, ptr [[A]], i16 [[IV]] -; CHECK-NEXT: br i1 false, label [[LOOP_LATCH]], label [[ELSE_1:%.*]] -; CHECK: else.1: -; CHECK-NEXT: br i1 false, label [[THEN_2:%.*]], label [[ELSE_2:%.*]] -; CHECK: then.2: -; CHECK-NEXT: br label [[ELSE_2]] -; CHECK: else.2: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[MERGE:%.*]] = phi ptr [ [[B]], [[ELSE_2]] ], [ poison, [[LOOP_HEADER]] ] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[MERGE]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; CHECK-NEXT: store i16 [[L]], ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[C_2:%.*]] = icmp eq i16 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[C_2]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -98,29 +79,7 @@ define void @test_not_first_lane_only_wide_compare(ptr %A, ptr noalias %B, i16 % ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i16, ptr [[A]], i16 [[IV]] -; CHECK-NEXT: [[L_0:%.*]] = load i16, ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[C_0:%.*]] = icmp ult i16 [[L_0]], [[X]] -; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[ELSE_1:%.*]] -; CHECK: else.1: -; CHECK-NEXT: [[C_1:%.*]] = icmp ult i16 [[L_0]], [[Y]] -; CHECK-NEXT: br i1 [[C_1]], label [[THEN_2:%.*]], label [[ELSE_2:%.*]] -; CHECK: then.2: -; CHECK-NEXT: br label [[ELSE_2]] -; CHECK: else.2: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[MERGE:%.*]] = phi ptr [ [[B]], [[ELSE_2]] ], [ poison, [[LOOP_HEADER]] ] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[MERGE]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; CHECK-NEXT: store i16 [[L]], ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[C_2:%.*]] = icmp eq i16 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[C_2]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -179,29 +138,7 @@ define void @test_not_first_lane_only_wide_compare_incoming_order_swapped(ptr %A ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i16, ptr [[A]], i16 [[IV]] -; CHECK-NEXT: [[L_0:%.*]] = load i16, ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[C_0:%.*]] = icmp ult i16 [[L_0]], [[X]] -; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[ELSE_1:%.*]] -; CHECK: else.1: -; CHECK-NEXT: [[C_1:%.*]] = icmp ult i16 [[L_0]], [[Y]] -; CHECK-NEXT: br i1 [[C_1]], label [[THEN_2:%.*]], label [[ELSE_2:%.*]] -; CHECK: then.2: -; CHECK-NEXT: br label [[ELSE_2]] -; CHECK: else.2: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[MERGE:%.*]] = phi ptr [ poison, [[LOOP_HEADER]] ], [ [[B]], [[ELSE_2]] ] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[MERGE]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; CHECK-NEXT: store i16 [[L]], ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[C_2:%.*]] = icmp eq i16 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[C_2]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll index 462865d..8da1dca 100644 --- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll +++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll @@ -31,20 +31,8 @@ define i8 @test_early_exit_max_tc_less_than_16(ptr dereferenceable(16) %A) nosyn ; VF8UF1-NEXT: br label %[[EXIT:.*]] ; VF8UF1: [[VECTOR_EARLY_EXIT]]: ; VF8UF1-NEXT: br label %[[EXIT]] -; VF8UF1: [[SCALAR_PH:.*]]: -; VF8UF1-NEXT: br label %[[LOOP_HEADER:.*]] -; VF8UF1: [[LOOP_HEADER]]: -; VF8UF1-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF8UF1-NEXT: [[P_SRC1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV1]] -; VF8UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC1]], align 1 -; VF8UF1-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 -; VF8UF1-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]] -; VF8UF1: [[LOOP_LATCH]]: -; VF8UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV1]], 1 -; VF8UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; VF8UF1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; VF8UF1: [[EXIT]]: -; VF8UF1-NEXT: [[RES:%.*]] = phi i8 [ 0, %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] +; VF8UF1-NEXT: [[RES:%.*]] = phi i8 [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] ; VF8UF1-NEXT: ret i8 [[RES]] ; ; VF8UF2-LABEL: define i8 @test_early_exit_max_tc_less_than_16( @@ -70,20 +58,8 @@ define i8 @test_early_exit_max_tc_less_than_16(ptr dereferenceable(16) %A) nosyn ; VF8UF2-NEXT: br label %[[EXIT:.*]] ; VF8UF2: [[VECTOR_EARLY_EXIT]]: ; VF8UF2-NEXT: br label %[[EXIT]] -; VF8UF2: [[SCALAR_PH:.*]]: -; VF8UF2-NEXT: br label %[[LOOP_HEADER:.*]] -; VF8UF2: [[LOOP_HEADER]]: -; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF8UF2-NEXT: [[P_SRC:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; VF8UF2-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC]], align 1 -; VF8UF2-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 -; VF8UF2-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]] -; VF8UF2: [[LOOP_LATCH]]: -; VF8UF2-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 -; VF8UF2-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; VF8UF2-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; VF8UF2: [[EXIT]]: -; VF8UF2-NEXT: [[RES:%.*]] = phi i8 [ 0, %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] +; VF8UF2-NEXT: [[RES:%.*]] = phi i8 [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] ; VF8UF2-NEXT: ret i8 [[RES]] ; ; VF16UF1-LABEL: define i8 @test_early_exit_max_tc_less_than_16( @@ -104,20 +80,8 @@ define i8 @test_early_exit_max_tc_less_than_16(ptr dereferenceable(16) %A) nosyn ; VF16UF1-NEXT: br label %[[EXIT:.*]] ; VF16UF1: [[VECTOR_EARLY_EXIT]]: ; VF16UF1-NEXT: br label %[[EXIT]] -; VF16UF1: [[SCALAR_PH:.*]]: -; VF16UF1-NEXT: br label %[[LOOP_HEADER:.*]] -; VF16UF1: [[LOOP_HEADER]]: -; VF16UF1-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF16UF1-NEXT: [[P_SRC1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV1]] -; VF16UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC1]], align 1 -; VF16UF1-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 -; VF16UF1-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]] -; VF16UF1: [[LOOP_LATCH]]: -; VF16UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV1]], 1 -; VF16UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; VF16UF1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; VF16UF1: [[EXIT]]: -; VF16UF1-NEXT: [[RES:%.*]] = phi i8 [ 0, %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] +; VF16UF1-NEXT: [[RES:%.*]] = phi i8 [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] ; VF16UF1-NEXT: ret i8 [[RES]] ; entry: @@ -166,20 +130,8 @@ define i64 @test_early_exit_max_tc_less_than_16_with_iv_used_outside(ptr derefer ; VF8UF1-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v8i1(<8 x i1> [[TMP3]], i1 true) ; VF8UF1-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], [[FIRST_ACTIVE_LANE]] ; VF8UF1-NEXT: br label %[[EXIT]] -; VF8UF1: [[SCALAR_PH:.*]]: -; VF8UF1-NEXT: br label %[[LOOP_HEADER:.*]] -; VF8UF1: [[LOOP_HEADER]]: -; VF8UF1-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF8UF1-NEXT: [[P_SRC1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV1]] -; VF8UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC1]], align 1 -; VF8UF1-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 -; VF8UF1-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]] -; VF8UF1: [[LOOP_LATCH]]: -; VF8UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV1]], 1 -; VF8UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; VF8UF1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; VF8UF1: [[EXIT]]: -; VF8UF1-NEXT: [[RES:%.*]] = phi i64 [ [[IV1]], %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP8]], %[[VECTOR_EARLY_EXIT]] ] +; VF8UF1-NEXT: [[RES:%.*]] = phi i64 [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP8]], %[[VECTOR_EARLY_EXIT]] ] ; VF8UF1-NEXT: ret i64 [[RES]] ; ; VF8UF2-LABEL: define i64 @test_early_exit_max_tc_less_than_16_with_iv_used_outside( @@ -212,20 +164,8 @@ define i64 @test_early_exit_max_tc_less_than_16_with_iv_used_outside(ptr derefer ; VF8UF2-NEXT: [[TMP11:%.*]] = select i1 [[TMP10]], i64 [[TMP9]], i64 [[TMP7]] ; VF8UF2-NEXT: [[TMP12:%.*]] = add i64 0, [[TMP11]] ; VF8UF2-NEXT: br label %[[EXIT]] -; VF8UF2: [[SCALAR_PH:.*]]: -; VF8UF2-NEXT: br label %[[LOOP_HEADER:.*]] -; VF8UF2: [[LOOP_HEADER]]: -; VF8UF2-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF8UF2-NEXT: [[P_SRC1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV1]] -; VF8UF2-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC1]], align 1 -; VF8UF2-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 -; VF8UF2-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]] -; VF8UF2: [[LOOP_LATCH]]: -; VF8UF2-NEXT: [[IV_NEXT]] = add nsw i64 [[IV1]], 1 -; VF8UF2-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; VF8UF2-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; VF8UF2: [[EXIT]]: -; VF8UF2-NEXT: [[RES:%.*]] = phi i64 [ [[IV1]], %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP12]], %[[VECTOR_EARLY_EXIT]] ] +; VF8UF2-NEXT: [[RES:%.*]] = phi i64 [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP12]], %[[VECTOR_EARLY_EXIT]] ] ; VF8UF2-NEXT: ret i64 [[RES]] ; ; VF16UF1-LABEL: define i64 @test_early_exit_max_tc_less_than_16_with_iv_used_outside( @@ -248,20 +188,8 @@ define i64 @test_early_exit_max_tc_less_than_16_with_iv_used_outside(ptr derefer ; VF16UF1-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v16i1(<16 x i1> [[TMP3]], i1 true) ; VF16UF1-NEXT: [[TMP5:%.*]] = add i64 0, [[FIRST_ACTIVE_LANE]] ; VF16UF1-NEXT: br label %[[EXIT]] -; VF16UF1: [[SCALAR_PH:.*]]: -; VF16UF1-NEXT: br label %[[LOOP_HEADER:.*]] -; VF16UF1: [[LOOP_HEADER]]: -; VF16UF1-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF16UF1-NEXT: [[P_SRC1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV1]] -; VF16UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC1]], align 1 -; VF16UF1-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 -; VF16UF1-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]] -; VF16UF1: [[LOOP_LATCH]]: -; VF16UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV1]], 1 -; VF16UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; VF16UF1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; VF16UF1: [[EXIT]]: -; VF16UF1-NEXT: [[RES:%.*]] = phi i64 [ [[IV1]], %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP5]], %[[VECTOR_EARLY_EXIT]] ] +; VF16UF1-NEXT: [[RES:%.*]] = phi i64 [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP5]], %[[VECTOR_EARLY_EXIT]] ] ; VF16UF1-NEXT: ret i64 [[RES]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-outside-iv-users.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-outside-iv-users.ll index d013584..2317af5 100644 --- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-outside-iv-users.ll +++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-outside-iv-users.ll @@ -17,18 +17,8 @@ define i64 @remove_loop_region_int_iv_used_outside(ptr %dst) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr ptr, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store ptr null, ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ], [ 15, %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 15 ; entry: br label %loop @@ -60,18 +50,8 @@ define i64 @remove_loop_region_int_iv_inc_used_outside(ptr %dst) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr ptr, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store ptr null, ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[IV_NEXT]], %[[LOOP]] ], [ 16, %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 16 ; entry: br label %loop @@ -105,19 +85,8 @@ define ptr @remove_loop_region_ptr_iv_used_outside(ptr %dst) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[IND_ESCAPE:%.*]] = getelementptr i8, ptr [[TMP0]], i64 -8 ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[DST]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[INT_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INT_IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: store ptr null, ptr [[PTR_IV]], align 8 -; CHECK-NEXT: [[INT_IV_NEXT]] = add i64 [[INT_IV]], 1 -; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 8 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[INT_IV_NEXT]], 16 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi ptr [ [[PTR_IV]], %[[LOOP]] ], [ [[IND_ESCAPE]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret ptr [[RES]] +; CHECK-NEXT: ret ptr [[IND_ESCAPE]] ; entry: br label %loop @@ -151,19 +120,8 @@ define ptr @remove_loop_region_ptr_iv_inc_used_outside(ptr %dst) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[DST]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[INT_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INT_IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: store ptr null, ptr [[PTR_IV]], align 8 -; CHECK-NEXT: [[INT_IV_NEXT]] = add i64 [[INT_IV]], 1 -; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 8 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[INT_IV_NEXT]], 16 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi ptr [ [[PTR_IV_NEXT]], %[[LOOP]] ], [ [[TMP0]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret ptr [[RES]] +; CHECK-NEXT: ret ptr [[TMP0]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll index 5f86469..e160a15 100644 --- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll +++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll @@ -176,15 +176,6 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5, ; VF8UF1-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF8UF1: [[MIDDLE_BLOCK]]: ; VF8UF1-NEXT: br label %[[EXIT:.*]] -; VF8UF1: [[SCALAR_PH:.*]]: -; VF8UF1-NEXT: br label %[[LOOP:.*]] -; VF8UF1: [[LOOP]]: -; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]] -; VF8UF1-NEXT: store i16 0, ptr [[GEP_DST]], align 2 -; VF8UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF8UF1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF8UF1-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; VF8UF1: [[EXIT]]: ; VF8UF1-NEXT: ret void ; @@ -316,15 +307,6 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5, ; VF8UF2-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF8UF2: [[MIDDLE_BLOCK]]: ; VF8UF2-NEXT: br label %[[EXIT:.*]] -; VF8UF2: [[SCALAR_PH:.*]]: -; VF8UF2-NEXT: br label %[[LOOP:.*]] -; VF8UF2: [[LOOP]]: -; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]] -; VF8UF2-NEXT: store i16 0, ptr [[GEP_DST]], align 2 -; VF8UF2-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF8UF2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF8UF2-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; VF8UF2: [[EXIT]]: ; VF8UF2-NEXT: ret void ; @@ -455,15 +437,6 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5, ; VF16UF1-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF16UF1: [[MIDDLE_BLOCK]]: ; VF16UF1-NEXT: br label %[[EXIT:.*]] -; VF16UF1: [[SCALAR_PH:.*]]: -; VF16UF1-NEXT: br label %[[LOOP:.*]] -; VF16UF1: [[LOOP]]: -; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]] -; VF16UF1-NEXT: store i16 0, ptr [[GEP_DST]], align 2 -; VF16UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF16UF1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF16UF1-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; VF16UF1: [[EXIT]]: ; VF16UF1-NEXT: ret void ; @@ -728,23 +701,14 @@ define void @scev_expand_step(i64 %x, ptr %dst) { ; VF8UF1: [[PRED_STORE_IF13]]: ; VF8UF1-NEXT: [[TMP40:%.*]] = mul i64 7, [[STEP]] ; VF8UF1-NEXT: [[TMP41:%.*]] = add i64 0, [[TMP40]] -; VF8UF1-NEXT: [[TMP42:%.*]] = add i64 [[TMP41]], [[STEP]] -; VF8UF1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP42]] -; VF8UF1-NEXT: store i8 0, ptr [[TMP43]], align 1 +; VF8UF1-NEXT: [[IV_NEXT:%.*]] = add i64 [[TMP41]], [[STEP]] +; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]] +; VF8UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1 ; VF8UF1-NEXT: br label %[[PRED_STORE_CONTINUE14]] ; VF8UF1: [[PRED_STORE_CONTINUE14]]: ; VF8UF1-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF8UF1: [[MIDDLE_BLOCK]]: ; VF8UF1-NEXT: br label %[[EXIT:.*]] -; VF8UF1: [[SCALAR_PH:.*]]: -; VF8UF1-NEXT: br label %[[LOOP:.*]] -; VF8UF1: [[LOOP]]: -; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF8UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]] -; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]] -; VF8UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1 -; VF8UF1-NEXT: [[EC:%.*]] = icmp slt i64 [[IV_NEXT]], 16 -; VF8UF1-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]] ; VF8UF1: [[EXIT]]: ; VF8UF1-NEXT: ret void ; @@ -922,22 +886,13 @@ define void @scev_expand_step(i64 %x, ptr %dst) { ; VF8UF2-NEXT: [[TMP81:%.*]] = mul i64 15, [[STEP]] ; VF8UF2-NEXT: [[TMP82:%.*]] = add i64 0, [[TMP81]] ; VF8UF2-NEXT: [[TMP83:%.*]] = add i64 [[TMP82]], [[STEP]] -; VF8UF2-NEXT: [[TMP84:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP83]] -; VF8UF2-NEXT: store i8 0, ptr [[TMP84]], align 1 +; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP83]] +; VF8UF2-NEXT: store i8 0, ptr [[GEP_DST]], align 1 ; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE30]] ; VF8UF2: [[PRED_STORE_CONTINUE30]]: ; VF8UF2-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF8UF2: [[MIDDLE_BLOCK]]: ; VF8UF2-NEXT: br label %[[EXIT:.*]] -; VF8UF2: [[SCALAR_PH:.*]]: -; VF8UF2-NEXT: br label %[[LOOP:.*]] -; VF8UF2: [[LOOP]]: -; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF8UF2-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]] -; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]] -; VF8UF2-NEXT: store i8 0, ptr [[GEP_DST]], align 1 -; VF8UF2-NEXT: [[EC:%.*]] = icmp slt i64 [[IV_NEXT]], 16 -; VF8UF2-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]] ; VF8UF2: [[EXIT]]: ; VF8UF2-NEXT: ret void ; @@ -1114,22 +1069,13 @@ define void @scev_expand_step(i64 %x, ptr %dst) { ; VF16UF1-NEXT: [[TMP80:%.*]] = mul i64 15, [[STEP]] ; VF16UF1-NEXT: [[TMP81:%.*]] = add i64 0, [[TMP80]] ; VF16UF1-NEXT: [[TMP82:%.*]] = add i64 [[TMP81]], [[STEP]] -; VF16UF1-NEXT: [[TMP83:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP82]] -; VF16UF1-NEXT: store i8 0, ptr [[TMP83]], align 1 +; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP82]] +; VF16UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1 ; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE30]] ; VF16UF1: [[PRED_STORE_CONTINUE30]]: ; VF16UF1-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF16UF1: [[MIDDLE_BLOCK]]: ; VF16UF1-NEXT: br label %[[EXIT:.*]] -; VF16UF1: [[SCALAR_PH:.*]]: -; VF16UF1-NEXT: br label %[[LOOP:.*]] -; VF16UF1: [[LOOP]]: -; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF16UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]] -; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]] -; VF16UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1 -; VF16UF1-NEXT: [[EC:%.*]] = icmp slt i64 [[IV_NEXT]], 16 -; VF16UF1-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]] ; VF16UF1: [[EXIT]]: ; VF16UF1-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll index 5a0c69b..06b0448 100644 --- a/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll @@ -753,3 +753,50 @@ exit: %r.0.lcssa = phi i64 [ %rdx.next, %loop ] ret i64 %r.0.lcssa } + +define i64 @print_mulacc_duplicate_extends(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) { +; CHECK-LABEL: 'print_mulacc_duplicate_extends' +; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { +; CHECK-NEXT: Live-in vp<[[VF:%.+]]> = VF +; CHECK-NEXT: Live-in vp<[[VFxUF:%.+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VTC:%.+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<%n> = original trip-count +; CHECK-EMPTY: +; CHECK: vector.ph: +; CHECK-NEXT: EMIT vp<[[RDX_START:%.+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[IV:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[IV_NEXT:%.+]]> +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<[[RDX:%.+]]> = phi vp<[[RDX_START]]>, vp<[[RDX_NEXT:%.+]]> +; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[IV]]>, ir<1> +; CHECK-NEXT: CLONE ir<[[ARRAYIDX0:%.+]]> = getelementptr inbounds ir<%x>, vp<[[STEPS]]> +; CHECK-NEXT: vp<[[ADDR0:%.+]]> = vector-pointer ir<[[ARRAYIDX0]]> +; CHECK-NEXT: WIDEN ir<[[LOAD0:%.+]]> = load vp<[[ADDR0]]> +; CHECK-NEXT: EXPRESSION vp<[[RDX_NEXT:%.+]]> = ir<[[RDX]]> + reduce.sub (mul nsw (ir<[[LOAD0]]> sext to i64), (ir<[[LOAD0]]> sext to i64)) +; CHECK-NEXT: EMIT vp<[[IV_NEXT]]> = add nuw vp<[[IV]]>, vp<[[VFxUF]]> +; CHECK-NEXT: EMIT branch-on-count vp<[[IV_NEXT]]>, vp<[[VTC]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %loop + +loop: + %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] + %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] + %arrayidx = getelementptr inbounds i16, ptr %x, i32 %iv + %load0 = load i16, ptr %arrayidx, align 4 + %conv0 = sext i16 %load0 to i32 + %mul = mul nsw i32 %conv0, %conv0 + %conv = sext i32 %mul to i64 + %rdx.next = sub nsw i64 %rdx, %conv + %iv.next = add nuw nsw i32 %iv, 1 + %exitcond = icmp eq i32 %iv.next, %n + br i1 %exitcond, label %exit, label %loop + +exit: + %r.0.lcssa = phi i64 [ %rdx.next, %loop ] + ret i64 %r.0.lcssa +} diff --git a/llvm/test/Transforms/LoopVectorize/widen-gep-all-indices-invariant.ll b/llvm/test/Transforms/LoopVectorize/widen-gep-all-indices-invariant.ll index 06b7bd8..d08ca8c 100644 --- a/llvm/test/Transforms/LoopVectorize/widen-gep-all-indices-invariant.ll +++ b/llvm/test/Transforms/LoopVectorize/widen-gep-all-indices-invariant.ll @@ -21,19 +21,6 @@ define void @pr63340(ptr %A, ptr %B) { ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[F_0_I:%.*]] = phi ptr [ [[A]], [[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[F_0_I]], i64 1 -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds ptr, ptr [[B]], i8 [[IV]] -; CHECK-NEXT: store ptr [[GEP]], ptr [[GEP_B]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i8 [[IV_NEXT]], -128 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -78,17 +65,6 @@ define void @wide_gep_index_invariant(ptr noalias %dst, ptr noalias %src, i64 %n ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[L:%.*]] = load ptr, ptr [[SRC]], align 8 -; CHECK-NEXT: [[GEP_L:%.*]] = getelementptr float, ptr [[L]], i64 [[N]] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr ptr, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store ptr [[GEP_L]], ptr [[GEP_DST]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -131,17 +107,6 @@ define void @wide_gep_multiple_indices_some_invariant(ptr noalias %dst, ptr noal ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[L:%.*]] = load ptr, ptr [[SRC]], align 8 -; CHECK-NEXT: [[GEP_L:%.*]] = getelementptr [10 x float], ptr [[L]], i32 [[X]], i64 [[IV]] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr ptr, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store ptr [[GEP_L]], ptr [[GEP_DST]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/widen-intrinsic.ll b/llvm/test/Transforms/LoopVectorize/widen-intrinsic.ll index 055f2fd..922ebe7 100644 --- a/llvm/test/Transforms/LoopVectorize/widen-intrinsic.ll +++ b/llvm/test/Transforms/LoopVectorize/widen-intrinsic.ll @@ -20,17 +20,6 @@ define void @powi_only_first_lane_used_of_second_arg(ptr %p, i32 %pow) { ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_GEP:%.*]] = getelementptr float, ptr [[P]], i32 [[IV]] -; CHECK-NEXT: [[X:%.*]] = load float, ptr [[P_GEP]], align 4 -; CHECK-NEXT: [[Y:%.*]] = call float @llvm.powi.f32.i32(float [[X]], i32 [[POW]]) -; CHECK-NEXT: store float [[Y]], ptr [[P_GEP]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i32 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll index 435e6fc..5e9fe8c 100644 --- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll +++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll @@ -34,8 +34,8 @@ define void @arm_mean_q7(ptr noundef %pSrc, i32 noundef %blockSize, ptr noundef ; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP2]], [[WHILE_END_LOOPEXIT]] ] ; CHECK-NEXT: [[AND:%.*]] = and i32 [[BLOCKSIZE]], 15 ; CHECK-NEXT: [[CMP2_NOT15:%.*]] = icmp eq i32 [[AND]], 0 -; CHECK-NEXT: br i1 [[CMP2_NOT15]], label [[WHILE_END5:%.*]], label [[MIDDLE_BLOCK:%.*]] -; CHECK: middle.block: +; CHECK-NEXT: br i1 [[CMP2_NOT15]], label [[WHILE_END5:%.*]], label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = tail call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 0, i32 [[AND]]) ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[PSRC_ADDR_0_LCSSA]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison) ; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i32> @@ -44,7 +44,7 @@ define void @arm_mean_q7(ptr noundef %pSrc, i32 noundef %blockSize, ptr noundef ; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[SUM_0_LCSSA]], [[TMP6]] ; CHECK-NEXT: br label [[WHILE_END5]] ; CHECK: while.end5: -; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA]], [[WHILE_END]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA]], [[WHILE_END]] ], [ [[TMP7]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[SUM_1_LCSSA]], [[BLOCKSIZE]] ; CHECK-NEXT: [[CONV6:%.*]] = trunc i32 [[DIV]] to i8 ; CHECK-NEXT: store i8 [[CONV6]], ptr [[PRESULT:%.*]], align 1 diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll index 4f52227..02e05b2 100644 --- a/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll +++ b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll @@ -527,23 +527,14 @@ define void @rt_stride_1_with_reordering(ptr %pl, i64 %stride, ptr %ps) { ret void } -; TODO: We want to generate this code: -; define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { -; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0 -; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 -; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 1 %gep_l0, i64 8, <4 x i1> splat (i1 true), i32 4) -; %bitcast_ = bitcast <4 x i32> %strided_load to <16 x i8> -; store <16 x i8> %bitcast_, ptr %gep_s0, align 1 -; ret void -; } -define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { -; CHECK-LABEL: define void @constant_stride_widen_no_reordering( +define void @constant_stride_masked_no_reordering(ptr %pl, i64 %stride, ptr %ps) { +; CHECK-LABEL: define void @constant_stride_masked_no_reordering( ; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 ; CHECK-NEXT: [[TMP1:%.*]] = call <28 x i8> @llvm.masked.load.v28i8.p0(ptr [[GEP_L0]], i32 1, <28 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <28 x i8> poison) -; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <28 x i8> [[TMP1]], <28 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27> -; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 1 +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <28 x i8> [[TMP1]], <28 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27> +; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 @@ -618,6 +609,107 @@ define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) } ; TODO: We want to generate this code: +; define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) #0 { +; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 +; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 +; %1 = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 1 %gep_l0, i64 100, <4 x i1> splat (i1 true), i32 4) +; %2 = bitcast <4 x i32> %1 to <16 x i8> +; store <16 x i8> %2, ptr %gep_s0, align 1 +; ret void +; } +define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { +; CHECK-LABEL: define void @constant_stride_widen_no_reordering( +; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 +; CHECK-NEXT: [[GEP_L4:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 100 +; CHECK-NEXT: [[GEP_L8:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 200 +; CHECK-NEXT: [[GEP_L12:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 300 +; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[GEP_L0]], align 1 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_L4]], align 1 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i8>, ptr [[GEP_L8]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i8>, ptr [[GEP_L12]], align 1 +; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> [[TMP2]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i8> [[TMP3]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP11]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x i8> [[TMP4]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19> +; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 1 +; CHECK-NEXT: ret void +; + %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 + %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1 + %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2 + %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3 + %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 100 + %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 101 + %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 102 + %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 103 + %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 200 + %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 201 + %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 202 + %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 203 + %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 300 + %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 301 + %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 302 + %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 303 + + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 + + %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 + %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 + %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2 + %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3 + %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4 + %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5 + %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6 + %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7 + %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8 + %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9 + %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10 + %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11 + %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12 + %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13 + %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 + %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 + + store i8 %load0, ptr %gep_s0, align 1 + store i8 %load1, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 + + ret void +} +; TODO: We want to generate this code: ; define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { ; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0 ; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 diff --git a/llvm/test/Unit/CMakeLists.txt b/llvm/test/Unit/CMakeLists.txt new file mode 100644 index 0000000..6b0abe1 --- /dev/null +++ b/llvm/test/Unit/CMakeLists.txt @@ -0,0 +1,5 @@ +add_lit_testsuite(check-llvm-unit "Running lit suite for LLVM unit tests" + ${CMAKE_CURRENT_BINARY_DIR} + EXCLUDE_FROM_CHECK_ALL + DEPENDS UnitTests + ) diff --git a/llvm/test/tools/llvm-ir2vec/entities.ll b/llvm/test/tools/llvm-ir2vec/entities.ll index 4b51adf..8dbce57 100644 --- a/llvm/test/tools/llvm-ir2vec/entities.ll +++ b/llvm/test/tools/llvm-ir2vec/entities.ll @@ -1,6 +1,6 @@ ; RUN: llvm-ir2vec entities | FileCheck %s -CHECK: 84 +CHECK: 110 CHECK-NEXT: Ret 0 CHECK-NEXT: Br 1 CHECK-NEXT: Switch 2 @@ -85,3 +85,29 @@ CHECK-NEXT: Function 80 CHECK-NEXT: Pointer 81 CHECK-NEXT: Constant 82 CHECK-NEXT: Variable 83 +CHECK-NEXT: FCMP_false 84 +CHECK-NEXT: FCMP_oeq 85 +CHECK-NEXT: FCMP_ogt 86 +CHECK-NEXT: FCMP_oge 87 +CHECK-NEXT: FCMP_olt 88 +CHECK-NEXT: FCMP_ole 89 +CHECK-NEXT: FCMP_one 90 +CHECK-NEXT: FCMP_ord 91 +CHECK-NEXT: FCMP_uno 92 +CHECK-NEXT: FCMP_ueq 93 +CHECK-NEXT: FCMP_ugt 94 +CHECK-NEXT: FCMP_uge 95 +CHECK-NEXT: FCMP_ult 96 +CHECK-NEXT: FCMP_ule 97 +CHECK-NEXT: FCMP_une 98 +CHECK-NEXT: FCMP_true 99 +CHECK-NEXT: ICMP_eq 100 +CHECK-NEXT: ICMP_ne 101 +CHECK-NEXT: ICMP_ugt 102 +CHECK-NEXT: ICMP_uge 103 +CHECK-NEXT: ICMP_ult 104 +CHECK-NEXT: ICMP_ule 105 +CHECK-NEXT: ICMP_sgt 106 +CHECK-NEXT: ICMP_sge 107 +CHECK-NEXT: ICMP_slt 108 +CHECK-NEXT: ICMP_sle 109 diff --git a/llvm/tools/llvm-cgdata/llvm-cgdata.cpp b/llvm/tools/llvm-cgdata/llvm-cgdata.cpp index 047557e..ea89c4d 100644 --- a/llvm/tools/llvm-cgdata/llvm-cgdata.cpp +++ b/llvm/tools/llvm-cgdata/llvm-cgdata.cpp @@ -83,7 +83,9 @@ static CGDataAction Action; static std::optional<CGDataFormat> OutputFormat; static std::vector<std::string> InputFilenames; +namespace llvm { extern cl::opt<bool> IndexedCodeGenDataLazyLoading; +} // end namespace llvm static void exitWithError(Twine Message, StringRef Whence = "", StringRef Hint = "") { diff --git a/llvm/tools/llvm-config/llvm-config.cpp b/llvm/tools/llvm-config/llvm-config.cpp index 49df8fd..7f8c55a 100644 --- a/llvm/tools/llvm-config/llvm-config.cpp +++ b/llvm/tools/llvm-config/llvm-config.cpp @@ -357,18 +357,18 @@ int main(int argc, char **argv) { ActivePrefix = CurrentExecPrefix; { SmallString<256> Path(LLVM_INSTALL_INCLUDEDIR); - sys::fs::make_absolute(ActivePrefix, Path); + sys::path::make_absolute(ActivePrefix, Path); ActiveIncludeDir = std::string(Path); } { SmallString<256> Path(LLVM_TOOLS_INSTALL_DIR); - sys::fs::make_absolute(ActivePrefix, Path); + sys::path::make_absolute(ActivePrefix, Path); ActiveBinDir = std::string(Path); } ActiveLibDir = ActivePrefix + "/lib" + LLVM_LIBDIR_SUFFIX; { SmallString<256> Path(LLVM_INSTALL_PACKAGE_DIR); - sys::fs::make_absolute(ActivePrefix, Path); + sys::path::make_absolute(ActivePrefix, Path); ActiveCMakeDir = std::string(Path); } ActiveIncludeOption = "-I" + ActiveIncludeDir; diff --git a/llvm/tools/llvm-dwp/llvm-dwp.cpp b/llvm/tools/llvm-dwp/llvm-dwp.cpp index 61ba82d..31bad2d 100644 --- a/llvm/tools/llvm-dwp/llvm-dwp.cpp +++ b/llvm/tools/llvm-dwp/llvm-dwp.cpp @@ -94,7 +94,7 @@ getDWOFilenames(StringRef ExecFilename) { dwarf::toString(Die.find(dwarf::DW_AT_comp_dir), ""); if (!DWOCompDir.empty()) { SmallString<16> DWOPath(DWOName); - sys::fs::make_absolute(DWOCompDir, DWOPath); + sys::path::make_absolute(DWOCompDir, DWOPath); if (!sys::fs::exists(DWOPath) && sys::fs::exists(DWOName)) DWOPaths.push_back(std::move(DWOName)); else diff --git a/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp b/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp index aabebf0..434449c 100644 --- a/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp +++ b/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp @@ -162,8 +162,8 @@ public: for (const BasicBlock &BB : F) { for (const auto &I : BB.instructionsWithoutDebug()) { - unsigned Opcode = Vocabulary::getSlotIndex(I.getOpcode()); - unsigned TypeID = Vocabulary::getSlotIndex(I.getType()->getTypeID()); + unsigned Opcode = Vocabulary::getIndex(I.getOpcode()); + unsigned TypeID = Vocabulary::getIndex(I.getType()->getTypeID()); // Add "Next" relationship with previous instruction if (HasPrevOpcode) { @@ -184,7 +184,7 @@ public: // Add "Arg" relationships unsigned ArgIndex = 0; for (const Use &U : I.operands()) { - unsigned OperandID = Vocabulary::getSlotIndex(*U); + unsigned OperandID = Vocabulary::getIndex(*U.get()); unsigned RelationID = ArgRelation + ArgIndex; OS << Opcode << '\t' << OperandID << '\t' << RelationID << '\n'; diff --git a/llvm/tools/llvm-opt-report/OptReport.cpp b/llvm/tools/llvm-opt-report/OptReport.cpp index 68ed92c..e4b4fc2 100644 --- a/llvm/tools/llvm-opt-report/OptReport.cpp +++ b/llvm/tools/llvm-opt-report/OptReport.cpp @@ -274,7 +274,7 @@ static bool writeReport(LocationInfoTy &LocationInfo) { for (auto &FI : LocationInfo) { SmallString<128> FileName(FI.first); if (!InputRelDir.empty()) - sys::fs::make_absolute(InputRelDir, FileName); + sys::path::make_absolute(InputRelDir, FileName); const auto &FileInfo = FI.second; diff --git a/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp b/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp index dc6059d..b6e8567 100644 --- a/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp +++ b/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp @@ -43,8 +43,11 @@ class FunctionPropertiesAnalysisTest : public testing::Test { public: FunctionPropertiesAnalysisTest() { auto VocabVector = ir2vec::Vocabulary::createDummyVocabForTest(1); - MAM.registerPass([&] { return IR2VecVocabAnalysis(VocabVector); }); - IR2VecVocab = ir2vec::Vocabulary(std::move(VocabVector)); + MAM.registerPass([VocabVector = std::move(VocabVector)]() mutable { + return IR2VecVocabAnalysis(std::move(VocabVector)); + }); + IR2VecVocab = + new ir2vec::Vocabulary(ir2vec::Vocabulary::createDummyVocabForTest(1)); MAM.registerPass([&] { return PassInstrumentationAnalysis(); }); FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); }); FAM.registerPass([&] { return DominatorTreeAnalysis(); }); @@ -66,7 +69,7 @@ protected: std::unique_ptr<LoopInfo> LI; FunctionAnalysisManager FAM; ModuleAnalysisManager MAM; - ir2vec::Vocabulary IR2VecVocab; + ir2vec::Vocabulary *IR2VecVocab; void TearDown() override { // Restore original IR2Vec weights @@ -78,7 +81,7 @@ protected: FunctionPropertiesInfo buildFPI(Function &F) { // FunctionPropertiesInfo assumes IR2VecVocabAnalysis has been run to // use IR2Vec. - auto VocabResult = MAM.getResult<IR2VecVocabAnalysis>(*F.getParent()); + auto &VocabResult = MAM.getResult<IR2VecVocabAnalysis>(*F.getParent()); (void)VocabResult; return FunctionPropertiesInfo::getFunctionPropertiesInfo(F, FAM); } @@ -106,7 +109,7 @@ protected: } std::unique_ptr<ir2vec::Embedder> createEmbedder(const Function &F) { - auto Emb = ir2vec::Embedder::create(IR2VecKind::Symbolic, F, IR2VecVocab); + auto Emb = ir2vec::Embedder::create(IR2VecKind::Symbolic, F, *IR2VecVocab); EXPECT_TRUE(static_cast<bool>(Emb)); return Emb; } diff --git a/llvm/unittests/Analysis/IR2VecTest.cpp b/llvm/unittests/Analysis/IR2VecTest.cpp index 9f2f6a3..743628f 100644 --- a/llvm/unittests/Analysis/IR2VecTest.cpp +++ b/llvm/unittests/Analysis/IR2VecTest.cpp @@ -295,7 +295,7 @@ TEST(IR2VecTest, ZeroDimensionEmbedding) { // Fixture for IR2Vec tests requiring IR setup. class IR2VecTestFixture : public ::testing::Test { protected: - Vocabulary V; + Vocabulary *V; LLVMContext Ctx; std::unique_ptr<Module> M; Function *F = nullptr; @@ -304,7 +304,7 @@ protected: Instruction *RetInst = nullptr; void SetUp() override { - V = Vocabulary(Vocabulary::createDummyVocabForTest(2)); + V = new Vocabulary(Vocabulary::createDummyVocabForTest(2)); // Setup IR M = std::make_unique<Module>("TestM", Ctx); @@ -322,7 +322,7 @@ protected: }; TEST_F(IR2VecTestFixture, GetInstVecMap_Symbolic) { - auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, V); + auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, *V); ASSERT_TRUE(static_cast<bool>(Emb)); const auto &InstMap = Emb->getInstVecMap(); @@ -341,7 +341,7 @@ TEST_F(IR2VecTestFixture, GetInstVecMap_Symbolic) { } TEST_F(IR2VecTestFixture, GetInstVecMap_FlowAware) { - auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, V); + auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, *V); ASSERT_TRUE(static_cast<bool>(Emb)); const auto &InstMap = Emb->getInstVecMap(); @@ -358,7 +358,7 @@ TEST_F(IR2VecTestFixture, GetInstVecMap_FlowAware) { } TEST_F(IR2VecTestFixture, GetBBVecMap_Symbolic) { - auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, V); + auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, *V); ASSERT_TRUE(static_cast<bool>(Emb)); const auto &BBMap = Emb->getBBVecMap(); @@ -373,7 +373,7 @@ TEST_F(IR2VecTestFixture, GetBBVecMap_Symbolic) { } TEST_F(IR2VecTestFixture, GetBBVecMap_FlowAware) { - auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, V); + auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, *V); ASSERT_TRUE(static_cast<bool>(Emb)); const auto &BBMap = Emb->getBBVecMap(); @@ -388,7 +388,7 @@ TEST_F(IR2VecTestFixture, GetBBVecMap_FlowAware) { } TEST_F(IR2VecTestFixture, GetBBVector_Symbolic) { - auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, V); + auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, *V); ASSERT_TRUE(static_cast<bool>(Emb)); const auto &BBVec = Emb->getBBVector(*BB); @@ -398,7 +398,7 @@ TEST_F(IR2VecTestFixture, GetBBVector_Symbolic) { } TEST_F(IR2VecTestFixture, GetBBVector_FlowAware) { - auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, V); + auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, *V); ASSERT_TRUE(static_cast<bool>(Emb)); const auto &BBVec = Emb->getBBVector(*BB); @@ -408,7 +408,7 @@ TEST_F(IR2VecTestFixture, GetBBVector_FlowAware) { } TEST_F(IR2VecTestFixture, GetFunctionVector_Symbolic) { - auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, V); + auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, *V); ASSERT_TRUE(static_cast<bool>(Emb)); const auto &FuncVec = Emb->getFunctionVector(); @@ -420,7 +420,7 @@ TEST_F(IR2VecTestFixture, GetFunctionVector_Symbolic) { } TEST_F(IR2VecTestFixture, GetFunctionVector_FlowAware) { - auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, V); + auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, *V); ASSERT_TRUE(static_cast<bool>(Emb)); const auto &FuncVec = Emb->getFunctionVector(); @@ -435,6 +435,7 @@ static constexpr unsigned MaxOpcodes = Vocabulary::MaxOpcodes; static constexpr unsigned MaxTypeIDs = Vocabulary::MaxTypeIDs; static constexpr unsigned MaxCanonicalTypeIDs = Vocabulary::MaxCanonicalTypeIDs; static constexpr unsigned MaxOperands = Vocabulary::MaxOperandKinds; +static constexpr unsigned MaxPredicateKinds = Vocabulary::MaxPredicateKinds; // Mapping between LLVM Type::TypeID tokens and Vocabulary::CanonicalTypeID // names and their canonical string keys. @@ -460,9 +461,13 @@ TEST(IR2VecVocabularyTest, DummyVocabTest) { EXPECT_EQ(Emb.size(), Dim); // Should have the correct total number of embeddings - EXPECT_EQ(VocabVecSize, MaxOpcodes + MaxCanonicalTypeIDs + MaxOperands); + EXPECT_EQ(VocabVecSize, MaxOpcodes + MaxCanonicalTypeIDs + MaxOperands + + MaxPredicateKinds); - auto ExpectedVocab = VocabVec; + // Collect embeddings for later comparison before moving VocabVec + std::vector<Embedding> ExpectedVocab; + for (const auto &Emb : VocabVec) + ExpectedVocab.push_back(Emb); IR2VecVocabAnalysis VocabAnalysis(std::move(VocabVec)); LLVMContext TestCtx; @@ -480,17 +485,17 @@ TEST(IR2VecVocabularyTest, DummyVocabTest) { } TEST(IR2VecVocabularyTest, SlotIdxMapping) { - // Test getSlotIndex for Opcodes + // Test getIndex for Opcodes #define EXPECT_OPCODE_SLOT(NUM, OPCODE, CLASS) \ - EXPECT_EQ(Vocabulary::getSlotIndex(NUM), static_cast<unsigned>(NUM - 1)); + EXPECT_EQ(Vocabulary::getIndex(NUM), static_cast<unsigned>(NUM - 1)); #define HANDLE_INST(NUM, OPCODE, CLASS) EXPECT_OPCODE_SLOT(NUM, OPCODE, CLASS) #include "llvm/IR/Instruction.def" #undef HANDLE_INST #undef EXPECT_OPCODE_SLOT - // Test getSlotIndex for Types + // Test getIndex for Types #define EXPECT_TYPE_SLOT(TypeIDTok, CanonEnum, CanonStr) \ - EXPECT_EQ(Vocabulary::getSlotIndex(Type::TypeIDTok), \ + EXPECT_EQ(Vocabulary::getIndex(Type::TypeIDTok), \ MaxOpcodes + static_cast<unsigned>( \ Vocabulary::CanonicalTypeID::CanonEnum)); @@ -498,7 +503,7 @@ TEST(IR2VecVocabularyTest, SlotIdxMapping) { #undef EXPECT_TYPE_SLOT - // Test getSlotIndex for Value operands + // Test getIndex for Value operands LLVMContext Ctx; Module M("TestM", Ctx); FunctionType *FTy = @@ -508,40 +513,59 @@ TEST(IR2VecVocabularyTest, SlotIdxMapping) { #define EXPECTED_VOCAB_OPERAND_SLOT(X) \ MaxOpcodes + MaxCanonicalTypeIDs + static_cast<unsigned>(X) // Test Function operand - EXPECT_EQ(Vocabulary::getSlotIndex(*F), + EXPECT_EQ(Vocabulary::getIndex(*F), EXPECTED_VOCAB_OPERAND_SLOT(Vocabulary::OperandKind::FunctionID)); // Test Constant operand Constant *C = ConstantInt::get(Type::getInt32Ty(Ctx), 42); - EXPECT_EQ(Vocabulary::getSlotIndex(*C), + EXPECT_EQ(Vocabulary::getIndex(*C), EXPECTED_VOCAB_OPERAND_SLOT(Vocabulary::OperandKind::ConstantID)); // Test Pointer operand BasicBlock *BB = BasicBlock::Create(Ctx, "entry", F); AllocaInst *PtrVal = new AllocaInst(Type::getInt32Ty(Ctx), 0, "ptr", BB); - EXPECT_EQ(Vocabulary::getSlotIndex(*PtrVal), + EXPECT_EQ(Vocabulary::getIndex(*PtrVal), EXPECTED_VOCAB_OPERAND_SLOT(Vocabulary::OperandKind::PointerID)); // Test Variable operand (function argument) Argument *Arg = F->getArg(0); - EXPECT_EQ(Vocabulary::getSlotIndex(*Arg), + EXPECT_EQ(Vocabulary::getIndex(*Arg), EXPECTED_VOCAB_OPERAND_SLOT(Vocabulary::OperandKind::VariableID)); #undef EXPECTED_VOCAB_OPERAND_SLOT + + // Test getIndex for predicates +#define EXPECTED_VOCAB_PREDICATE_SLOT(X) \ + MaxOpcodes + MaxCanonicalTypeIDs + MaxOperands + static_cast<unsigned>(X) + for (unsigned P = CmpInst::FIRST_FCMP_PREDICATE; + P <= CmpInst::LAST_FCMP_PREDICATE; ++P) { + CmpInst::Predicate Pred = static_cast<CmpInst::Predicate>(P); + unsigned ExpectedIdx = + EXPECTED_VOCAB_PREDICATE_SLOT((P - CmpInst::FIRST_FCMP_PREDICATE)); + EXPECT_EQ(Vocabulary::getIndex(Pred), ExpectedIdx); + } + auto ICMP_Start = CmpInst::LAST_FCMP_PREDICATE + 1; + for (unsigned P = CmpInst::FIRST_ICMP_PREDICATE; + P <= CmpInst::LAST_ICMP_PREDICATE; ++P) { + CmpInst::Predicate Pred = static_cast<CmpInst::Predicate>(P); + unsigned ExpectedIdx = EXPECTED_VOCAB_PREDICATE_SLOT( + ICMP_Start + P - CmpInst::FIRST_ICMP_PREDICATE); + EXPECT_EQ(Vocabulary::getIndex(Pred), ExpectedIdx); + } +#undef EXPECTED_VOCAB_PREDICATE_SLOT } #if GTEST_HAS_DEATH_TEST #ifndef NDEBUG TEST(IR2VecVocabularyTest, NumericIDMapInvalidInputs) { // Test invalid opcode IDs - EXPECT_DEATH(Vocabulary::getSlotIndex(0u), "Invalid opcode"); - EXPECT_DEATH(Vocabulary::getSlotIndex(MaxOpcodes + 1), "Invalid opcode"); + EXPECT_DEATH(Vocabulary::getIndex(0u), "Invalid opcode"); + EXPECT_DEATH(Vocabulary::getIndex(MaxOpcodes + 1), "Invalid opcode"); // Test invalid type IDs - EXPECT_DEATH(Vocabulary::getSlotIndex(static_cast<Type::TypeID>(MaxTypeIDs)), + EXPECT_DEATH(Vocabulary::getIndex(static_cast<Type::TypeID>(MaxTypeIDs)), + "Invalid type ID"); + EXPECT_DEATH(Vocabulary::getIndex(static_cast<Type::TypeID>(MaxTypeIDs + 10)), "Invalid type ID"); - EXPECT_DEATH( - Vocabulary::getSlotIndex(static_cast<Type::TypeID>(MaxTypeIDs + 10)), - "Invalid type ID"); } #endif // NDEBUG #endif // GTEST_HAS_DEATH_TEST @@ -551,7 +575,7 @@ TEST(IR2VecVocabularyTest, StringKeyGeneration) { EXPECT_EQ(Vocabulary::getStringKey(12), "Add"); #define EXPECT_OPCODE(NUM, OPCODE, CLASS) \ - EXPECT_EQ(Vocabulary::getStringKey(Vocabulary::getSlotIndex(NUM)), \ + EXPECT_EQ(Vocabulary::getStringKey(Vocabulary::getIndex(NUM)), \ Vocabulary::getVocabKeyForOpcode(NUM)); #define HANDLE_INST(NUM, OPCODE, CLASS) EXPECT_OPCODE(NUM, OPCODE, CLASS) #include "llvm/IR/Instruction.def" @@ -569,6 +593,7 @@ TEST(IR2VecVocabularyTest, StringKeyGeneration) { #undef EXPECT_CANONICAL_TYPE_NAME + // Verify OperandKind -> string mapping #define HANDLE_OPERAND_KINDS(X) \ X(FunctionID, "Function") \ X(PointerID, "Pointer") \ @@ -592,6 +617,28 @@ TEST(IR2VecVocabularyTest, StringKeyGeneration) { Vocabulary::getStringKey(MaxOpcodes + MaxCanonicalTypeIDs + 1); EXPECT_EQ(FuncArgKey, "Function"); EXPECT_EQ(PtrArgKey, "Pointer"); + +// Verify PredicateKind -> string mapping +#define EXPECT_PREDICATE_KIND(PredNum, PredPos, PredKind) \ + do { \ + std::string PredStr = \ + std::string(PredKind) + "_" + \ + CmpInst::getPredicateName(static_cast<CmpInst::Predicate>(PredNum)) \ + .str(); \ + unsigned Pos = MaxOpcodes + MaxCanonicalTypeIDs + MaxOperands + PredPos; \ + EXPECT_EQ(Vocabulary::getStringKey(Pos), PredStr); \ + } while (0) + + for (unsigned P = CmpInst::FIRST_FCMP_PREDICATE; + P <= CmpInst::LAST_FCMP_PREDICATE; ++P) + EXPECT_PREDICATE_KIND(P, P - CmpInst::FIRST_FCMP_PREDICATE, "FCMP"); + + auto ICMP_Pos = CmpInst::LAST_FCMP_PREDICATE + 1; + for (unsigned P = CmpInst::FIRST_ICMP_PREDICATE; + P <= CmpInst::LAST_ICMP_PREDICATE; ++P) + EXPECT_PREDICATE_KIND(P, ICMP_Pos++, "ICMP"); + +#undef EXPECT_PREDICATE_KIND } TEST(IR2VecVocabularyTest, VocabularyDimensions) { @@ -627,10 +674,12 @@ TEST(IR2VecVocabularyTest, InvalidAccess) { #endif // GTEST_HAS_DEATH_TEST TEST(IR2VecVocabularyTest, TypeIDStringKeyMapping) { + Vocabulary V = Vocabulary(Vocabulary::createDummyVocabForTest()); #define EXPECT_TYPE_TO_CANONICAL(TypeIDTok, CanonEnum, CanonStr) \ - EXPECT_EQ( \ - Vocabulary::getStringKey(Vocabulary::getSlotIndex(Type::TypeIDTok)), \ - CanonStr); + do { \ + unsigned FlatIdx = V.getIndex(Type::TypeIDTok); \ + EXPECT_EQ(Vocabulary::getStringKey(FlatIdx), CanonStr); \ + } while (0); IR2VEC_HANDLE_TYPE_BIMAP(EXPECT_TYPE_TO_CANONICAL) @@ -638,14 +687,20 @@ TEST(IR2VecVocabularyTest, TypeIDStringKeyMapping) { } TEST(IR2VecVocabularyTest, InvalidVocabularyConstruction) { - std::vector<Embedding> InvalidVocab; - InvalidVocab.push_back(Embedding(2, 1.0)); - InvalidVocab.push_back(Embedding(2, 2.0)); - - Vocabulary V(std::move(InvalidVocab)); + // Test 1: Create invalid VocabStorage with insufficient sections + std::vector<std::vector<Embedding>> InvalidSectionData; + // Only add one section with 2 embeddings, but the vocabulary needs 4 sections + std::vector<Embedding> Section1; + Section1.push_back(Embedding(2, 1.0)); + Section1.push_back(Embedding(2, 2.0)); + InvalidSectionData.push_back(std::move(Section1)); + + VocabStorage InvalidStorage(std::move(InvalidSectionData)); + Vocabulary V(std::move(InvalidStorage)); EXPECT_FALSE(V.isValid()); { + // Test 2: Default-constructed vocabulary should be invalid Vocabulary InvalidResult; EXPECT_FALSE(InvalidResult.isValid()); #if GTEST_HAS_DEATH_TEST @@ -656,4 +711,265 @@ TEST(IR2VecVocabularyTest, InvalidVocabularyConstruction) { } } +TEST(VocabStorageTest, DefaultConstructor) { + VocabStorage storage; + + EXPECT_EQ(storage.size(), 0u); + EXPECT_EQ(storage.getNumSections(), 0u); + EXPECT_EQ(storage.getDimension(), 0u); + EXPECT_FALSE(storage.isValid()); + + // Test iterators on empty storage + EXPECT_EQ(storage.begin(), storage.end()); +} + +TEST(VocabStorageTest, BasicConstruction) { + // Create test data with 3 sections + std::vector<std::vector<Embedding>> sectionData; + + // Section 0: 2 embeddings of dimension 3 + std::vector<Embedding> section0; + section0.emplace_back(std::vector<double>{1.0, 2.0, 3.0}); + section0.emplace_back(std::vector<double>{4.0, 5.0, 6.0}); + sectionData.push_back(std::move(section0)); + + // Section 1: 1 embedding of dimension 3 + std::vector<Embedding> section1; + section1.emplace_back(std::vector<double>{7.0, 8.0, 9.0}); + sectionData.push_back(std::move(section1)); + + // Section 2: 3 embeddings of dimension 3 + std::vector<Embedding> section2; + section2.emplace_back(std::vector<double>{10.0, 11.0, 12.0}); + section2.emplace_back(std::vector<double>{13.0, 14.0, 15.0}); + section2.emplace_back(std::vector<double>{16.0, 17.0, 18.0}); + sectionData.push_back(std::move(section2)); + + VocabStorage storage(std::move(sectionData)); + + EXPECT_EQ(storage.size(), 6u); // Total: 2 + 1 + 3 = 6 + EXPECT_EQ(storage.getNumSections(), 3u); + EXPECT_EQ(storage.getDimension(), 3u); + EXPECT_TRUE(storage.isValid()); +} + +TEST(VocabStorageTest, SectionAccess) { + // Create test data + std::vector<std::vector<Embedding>> sectionData; + + std::vector<Embedding> section0; + section0.emplace_back(std::vector<double>{1.0, 2.0}); + section0.emplace_back(std::vector<double>{3.0, 4.0}); + sectionData.push_back(std::move(section0)); + + std::vector<Embedding> section1; + section1.emplace_back(std::vector<double>{5.0, 6.0}); + sectionData.push_back(std::move(section1)); + + VocabStorage storage(std::move(sectionData)); + + // Test section access + EXPECT_EQ(storage[0].size(), 2u); + EXPECT_EQ(storage[1].size(), 1u); + + // Test embedding values + EXPECT_THAT(storage[0][0].getData(), ElementsAre(1.0, 2.0)); + EXPECT_THAT(storage[0][1].getData(), ElementsAre(3.0, 4.0)); + EXPECT_THAT(storage[1][0].getData(), ElementsAre(5.0, 6.0)); +} + +#if GTEST_HAS_DEATH_TEST +#ifndef NDEBUG +TEST(VocabStorageTest, InvalidSectionAccess) { + std::vector<std::vector<Embedding>> sectionData; + std::vector<Embedding> section0; + section0.emplace_back(std::vector<double>{1.0, 2.0}); + sectionData.push_back(std::move(section0)); + + VocabStorage storage(std::move(sectionData)); + + EXPECT_DEATH(storage[1], "Invalid section ID"); + EXPECT_DEATH(storage[10], "Invalid section ID"); +} + +TEST(VocabStorageTest, EmptySection) { + std::vector<std::vector<Embedding>> sectionData; + std::vector<Embedding> emptySection; // Empty section + sectionData.push_back(std::move(emptySection)); + + std::vector<Embedding> validSection; + validSection.emplace_back(std::vector<double>{1.0}); + sectionData.push_back(std::move(validSection)); + + EXPECT_DEATH(VocabStorage(std::move(sectionData)), + "Vocabulary section is empty"); +} + +TEST(VocabStorageTest, EmptyMiddleSection) { + std::vector<std::vector<Embedding>> sectionData; + + // Valid first section + std::vector<Embedding> validSection1; + validSection1.emplace_back(std::vector<double>{1.0}); + sectionData.push_back(std::move(validSection1)); + + // Empty middle section + std::vector<Embedding> emptySection; + sectionData.push_back(std::move(emptySection)); + + // Valid last section + std::vector<Embedding> validSection2; + validSection2.emplace_back(std::vector<double>{2.0}); + sectionData.push_back(std::move(validSection2)); + + EXPECT_DEATH(VocabStorage(std::move(sectionData)), + "Vocabulary section is empty"); +} + +TEST(VocabStorageTest, NoSections) { + std::vector<std::vector<Embedding>> sectionData; // No sections + + EXPECT_DEATH(VocabStorage(std::move(sectionData)), + "Vocabulary has no sections"); +} + +TEST(VocabStorageTest, MismatchedDimensionsAcrossSections) { + std::vector<std::vector<Embedding>> sectionData; + + // Section 0: embeddings with dimension 2 + std::vector<Embedding> section0; + section0.emplace_back(std::vector<double>{1.0, 2.0}); + section0.emplace_back(std::vector<double>{3.0, 4.0}); + sectionData.push_back(std::move(section0)); + + // Section 1: embedding with dimension 3 (mismatch!) + std::vector<Embedding> section1; + section1.emplace_back(std::vector<double>{5.0, 6.0, 7.0}); + sectionData.push_back(std::move(section1)); + + EXPECT_DEATH(VocabStorage(std::move(sectionData)), + "All embeddings must have the same dimension"); +} + +TEST(VocabStorageTest, MismatchedDimensionsWithinSection) { + std::vector<std::vector<Embedding>> sectionData; + + // Section 0: first embedding with dimension 2, second with dimension 3 + std::vector<Embedding> section0; + section0.emplace_back(std::vector<double>{1.0, 2.0}); + section0.emplace_back(std::vector<double>{3.0, 4.0, 5.0}); // Mismatch! + sectionData.push_back(std::move(section0)); + + EXPECT_DEATH(VocabStorage(std::move(sectionData)), + "All embeddings must have the same dimension"); +} +#endif // NDEBUG +#endif // GTEST_HAS_DEATH_TEST + +TEST(VocabStorageTest, IteratorBasics) { + std::vector<std::vector<Embedding>> sectionData; + + std::vector<Embedding> section0; + section0.emplace_back(std::vector<double>{1.0, 2.0}); + section0.emplace_back(std::vector<double>{3.0, 4.0}); + sectionData.push_back(std::move(section0)); + + std::vector<Embedding> section1; + section1.emplace_back(std::vector<double>{5.0, 6.0}); + sectionData.push_back(std::move(section1)); + + VocabStorage storage(std::move(sectionData)); + + // Test iterator basics + auto it = storage.begin(); + auto end = storage.end(); + + EXPECT_NE(it, end); + + // Check first embedding + EXPECT_THAT((*it).getData(), ElementsAre(1.0, 2.0)); + + // Advance to second embedding + ++it; + EXPECT_NE(it, end); + EXPECT_THAT((*it).getData(), ElementsAre(3.0, 4.0)); + + // Advance to third embedding (in section 1) + ++it; + EXPECT_NE(it, end); + EXPECT_THAT((*it).getData(), ElementsAre(5.0, 6.0)); + + // Advance past the end + ++it; + EXPECT_EQ(it, end); +} + +TEST(VocabStorageTest, IteratorTraversal) { + std::vector<std::vector<Embedding>> sectionData; + + // Section 0: 2 embeddings + std::vector<Embedding> section0; + section0.emplace_back(std::vector<double>{10.0}); + section0.emplace_back(std::vector<double>{20.0}); + sectionData.push_back(std::move(section0)); + + // Section 1: 1 embedding + std::vector<Embedding> section1; + section1.emplace_back(std::vector<double>{25.0}); + sectionData.push_back(std::move(section1)); + + // Section 2: 3 embeddings + std::vector<Embedding> section2; + section2.emplace_back(std::vector<double>{30.0}); + section2.emplace_back(std::vector<double>{40.0}); + section2.emplace_back(std::vector<double>{50.0}); + sectionData.push_back(std::move(section2)); + + VocabStorage storage(std::move(sectionData)); + + // Collect all values using iterator + std::vector<double> values; + for (const auto &emb : storage) { + EXPECT_EQ(emb.size(), 1u); + values.push_back(emb[0]); + } + + // Should get all embeddings from all sections + EXPECT_THAT(values, ElementsAre(10.0, 20.0, 25.0, 30.0, 40.0, 50.0)); +} + +TEST(VocabStorageTest, IteratorComparison) { + std::vector<std::vector<Embedding>> sectionData; + std::vector<Embedding> section0; + section0.emplace_back(std::vector<double>{1.0}); + section0.emplace_back(std::vector<double>{2.0}); + sectionData.push_back(std::move(section0)); + + VocabStorage storage(std::move(sectionData)); + + auto it1 = storage.begin(); + auto it2 = storage.begin(); + auto end = storage.end(); + + // Test equality + EXPECT_EQ(it1, it2); + EXPECT_NE(it1, end); + + // Advance one iterator + ++it1; + EXPECT_NE(it1, it2); + EXPECT_NE(it1, end); + + // Advance second iterator to match + ++it2; + EXPECT_EQ(it1, it2); + + // Advance both to end + ++it1; + ++it2; + EXPECT_EQ(it1, end); + EXPECT_EQ(it2, end); + EXPECT_EQ(it1, it2); +} + } // end anonymous namespace diff --git a/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp b/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp index 8c4fd8b..d1c0f64 100644 --- a/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp +++ b/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp @@ -24,7 +24,9 @@ using namespace llvm; using namespace llvm::memprof; +namespace llvm { LLVM_ABI extern cl::opt<bool> MemProfKeepAllNotColdContexts; +} // end namespace llvm namespace { diff --git a/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp b/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp index 45dc50e..c8752c7 100644 --- a/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp +++ b/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp @@ -25,9 +25,10 @@ #include "llvm/Support/raw_ostream.h" #include "gtest/gtest.h" -LLVM_ABI extern llvm::cl::opt<bool> ScalePartialSampleProfileWorkingSetSize; - namespace llvm { + +LLVM_ABI extern cl::opt<bool> ScalePartialSampleProfileWorkingSetSize; + namespace { class ProfileSummaryInfoTest : public testing::Test { diff --git a/llvm/unittests/CodeGen/RegAllocScoreTest.cpp b/llvm/unittests/CodeGen/RegAllocScoreTest.cpp index 86bfc7a..432dc93 100644 --- a/llvm/unittests/CodeGen/RegAllocScoreTest.cpp +++ b/llvm/unittests/CodeGen/RegAllocScoreTest.cpp @@ -31,11 +31,14 @@ #include "gtest/gtest.h" using namespace llvm; + +namespace llvm { LLVM_ABI extern cl::opt<double> CopyWeight; LLVM_ABI extern cl::opt<double> LoadWeight; LLVM_ABI extern cl::opt<double> StoreWeight; LLVM_ABI extern cl::opt<double> CheapRematWeight; LLVM_ABI extern cl::opt<double> ExpensiveRematWeight; +} // namespace llvm namespace { // Include helper functions to ease the manipulation of MachineFunctions. diff --git a/llvm/unittests/Frontend/HLSLRootSignatureDumpTest.cpp b/llvm/unittests/Frontend/HLSLRootSignatureDumpTest.cpp index 1eb03f1..451c376 100644 --- a/llvm/unittests/Frontend/HLSLRootSignatureDumpTest.cpp +++ b/llvm/unittests/Frontend/HLSLRootSignatureDumpTest.cpp @@ -266,7 +266,8 @@ TEST(HLSLRootSignatureTest, DefaultStaticSamplerDump) { "minLOD = 0.000000e+00, " "maxLOD = 3.402823e+38, " "space = 0, " - "visibility = All" + "visibility = All, " + "flags = None" ")"; EXPECT_EQ(Out, Expected); } @@ -287,6 +288,7 @@ TEST(HLSLRootSignatureTest, DefinedStaticSamplerDump) { Sampler.MaxLOD = 32.0f; Sampler.Space = 7; Sampler.Visibility = llvm::dxbc::ShaderVisibility::Domain; + Sampler.Flags = llvm::dxbc::StaticSamplerFlags::NonNormalizedCoordinates; std::string Out; llvm::raw_string_ostream OS(Out); @@ -305,7 +307,8 @@ TEST(HLSLRootSignatureTest, DefinedStaticSamplerDump) { "minLOD = 1.000000e+00, " "maxLOD = 3.200000e+01, " "space = 7, " - "visibility = Domain" + "visibility = Domain, " + "flags = NonNormalizedCoordinates" ")"; EXPECT_EQ(Out, Expected); } diff --git a/llvm/unittests/ProfileData/MemProfTest.cpp b/llvm/unittests/ProfileData/MemProfTest.cpp index abe36bc..6ea951e 100644 --- a/llvm/unittests/ProfileData/MemProfTest.cpp +++ b/llvm/unittests/ProfileData/MemProfTest.cpp @@ -26,13 +26,14 @@ #include <initializer_list> -LLVM_ABI extern llvm::cl::opt<float> MemProfLifetimeAccessDensityColdThreshold; -LLVM_ABI extern llvm::cl::opt<unsigned> MemProfAveLifetimeColdThreshold; -LLVM_ABI extern llvm::cl::opt<unsigned> +namespace llvm { + +LLVM_ABI extern cl::opt<float> MemProfLifetimeAccessDensityColdThreshold; +LLVM_ABI extern cl::opt<unsigned> MemProfAveLifetimeColdThreshold; +LLVM_ABI extern cl::opt<unsigned> MemProfMinAveLifetimeAccessDensityHotThreshold; -LLVM_ABI extern llvm::cl::opt<bool> MemProfUseHotHints; +LLVM_ABI extern cl::opt<bool> MemProfUseHotHints; -namespace llvm { namespace memprof { namespace { diff --git a/llvm/unittests/Support/Path.cpp b/llvm/unittests/Support/Path.cpp index 888729b..eb649de 100644 --- a/llvm/unittests/Support/Path.cpp +++ b/llvm/unittests/Support/Path.cpp @@ -255,14 +255,14 @@ TEST(Support, Path) { { SmallString<32> Relative("foo.cpp"); - sys::fs::make_absolute("/root", Relative); + path::make_absolute("/root", Relative); Relative[5] = '/'; // Fix up windows paths. ASSERT_EQ("/root/foo.cpp", Relative); } { SmallString<32> Relative("foo.cpp"); - sys::fs::make_absolute("//root", Relative); + path::make_absolute("//root", Relative); Relative[6] = '/'; // Fix up windows paths. ASSERT_EQ("//root/foo.cpp", Relative); } diff --git a/llvm/utils/gn/secondary/bolt/lib/Core/BUILD.gn b/llvm/utils/gn/secondary/bolt/lib/Core/BUILD.gn index d4ec80b..c143acf 100644 --- a/llvm/utils/gn/secondary/bolt/lib/Core/BUILD.gn +++ b/llvm/utils/gn/secondary/bolt/lib/Core/BUILD.gn @@ -36,6 +36,7 @@ static_library("Core") { "GDBIndex.cpp", "HashUtilities.cpp", "JumpTable.cpp", + "MCInstUtils.cpp", "MCPlusBuilder.cpp", "ParallelUtilities.cpp", "Relocation.cpp", diff --git a/llvm/utils/gn/secondary/compiler-rt/lib/builtins/sources.gni b/llvm/utils/gn/secondary/compiler-rt/lib/builtins/sources.gni index ac48b94..2ab2a0e 100644 --- a/llvm/utils/gn/secondary/compiler-rt/lib/builtins/sources.gni +++ b/llvm/utils/gn/secondary/compiler-rt/lib/builtins/sources.gni @@ -526,6 +526,13 @@ if (current_cpu == "ve") { ] } +if (current_cpu == "wasm") { + builtins_sources += [ + "wasm/__c_longjmp.S", + "wasm/__cpp_exceptions.S", + ] +} + if (!compiler_rt_exclude_atomic_builtin) { builtins_sources += [ "atomic.c" ] } diff --git a/llvm/utils/gn/secondary/llvm/lib/CAS/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/CAS/BUILD.gn index 2f692d7..c37f43c 100644 --- a/llvm/utils/gn/secondary/llvm/lib/CAS/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/CAS/BUILD.gn @@ -4,9 +4,11 @@ static_library("CAS") { "ActionCache.cpp", "ActionCaches.cpp", "BuiltinCAS.cpp", + "DatabaseFile.cpp", "InMemoryCAS.cpp", "MappedFileRegionArena.cpp", "ObjectStore.cpp", "OnDiskCommon.cpp", + "OnDiskTrieRawHashMap.cpp", ] } diff --git a/llvm/utils/gn/secondary/llvm/unittests/CAS/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/CAS/BUILD.gn index de6de0b..ccb447f 100644 --- a/llvm/utils/gn/secondary/llvm/unittests/CAS/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/unittests/CAS/BUILD.gn @@ -10,6 +10,7 @@ unittest("CASTests") { "ActionCacheTest.cpp", "CASTestConfig.cpp", "ObjectStoreTest.cpp", + "OnDiskTrieRawHashMapTest.cpp", "ProgramTest.cpp", ] } diff --git a/llvm/utils/profcheck-xfail.txt b/llvm/utils/profcheck-xfail.txt index 08c8944..77e6ab7 100644 --- a/llvm/utils/profcheck-xfail.txt +++ b/llvm/utils/profcheck-xfail.txt @@ -1414,7 +1414,6 @@ Transforms/SimplifyCFG/merge-cond-stores.ll Transforms/SimplifyCFG/multiple-phis.ll Transforms/SimplifyCFG/PhiBlockMerge.ll Transforms/SimplifyCFG/pr48641.ll -Transforms/SimplifyCFG/preserve-branchweights.ll Transforms/SimplifyCFG/preserve-store-alignment.ll Transforms/SimplifyCFG/rangereduce.ll Transforms/SimplifyCFG/RISCV/select-trunc-i64.ll @@ -1424,7 +1423,6 @@ Transforms/SimplifyCFG/safe-abs.ll Transforms/SimplifyCFG/SimplifyEqualityComparisonWithOnlyPredecessor-domtree-preservation-edgecase.ll Transforms/SimplifyCFG/speculate-blocks.ll Transforms/SimplifyCFG/speculate-derefable-load.ll -Transforms/SimplifyCFG/suppress-zero-branch-weights.ll Transforms/SimplifyCFG/switch_create-custom-dl.ll Transforms/SimplifyCFG/switch_create.ll Transforms/SimplifyCFG/switch-dup-bbs.ll diff --git a/mlir/include/mlir-c/IR.h b/mlir/include/mlir-c/IR.h index 061d762..c464e4d 100644 --- a/mlir/include/mlir-c/IR.h +++ b/mlir/include/mlir-c/IR.h @@ -634,6 +634,10 @@ MLIR_CAPI_EXPORTED MlirContext mlirOperationGetContext(MlirOperation op); /// Gets the location of the operation. MLIR_CAPI_EXPORTED MlirLocation mlirOperationGetLocation(MlirOperation op); +/// Sets the location of the operation. +MLIR_CAPI_EXPORTED void mlirOperationSetLocation(MlirOperation op, + MlirLocation loc); + /// Gets the type id of the operation. /// Returns null if the operation does not have a registered operation /// description. diff --git a/mlir/include/mlir/Dialect/Math/Transforms/Passes.td b/mlir/include/mlir/Dialect/Math/Transforms/Passes.td index 4d415ae..48346abd 100644 --- a/mlir/include/mlir/Dialect/Math/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Math/Transforms/Passes.td @@ -64,4 +64,12 @@ def MathExpandOpsPass : Pass<"math-expand-ops"> { ]; } +def MathSincosFusionPass : Pass<"math-sincos-fusion"> { + let summary = "Fuse sin and cos operations."; + let description = [{ + Fuse sin and cos operations into a sincos operation. + }]; + let dependentDialects = ["math::MathDialect"]; +} + #endif // MLIR_DIALECT_MATH_TRANSFORMS_PASSES diff --git a/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h b/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h index 74e1d28..ba11259 100644 --- a/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h +++ b/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h @@ -9,6 +9,7 @@ #ifndef MLIR_DIALECT_TRANSFORM_TUNEEXTENSION_TUNEEXTENSIONOPS_H #define MLIR_DIALECT_TRANSFORM_TUNEEXTENSION_TUNEEXTENSIONOPS_H +#include "mlir/Dialect/Transform/IR/TransformOps.h" #include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/OpDefinition.h" diff --git a/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.td b/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.td index d68d451..d095659 100644 --- a/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.td +++ b/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.td @@ -11,10 +11,15 @@ include "mlir/Dialect/Transform/IR/TransformDialect.td" include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.td" +include "mlir/Interfaces/ControlFlowInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/IR/BuiltinAttributes.td" include "mlir/IR/CommonAttrConstraints.td" +//===----------------------------------------------------------------------===// +// KnobOp +//===----------------------------------------------------------------------===// + def KnobOp : Op<Transform_Dialect, "tune.knob", [ DeclareOpInterfaceMethods<TransformOpInterface>, DeclareOpInterfaceMethods<MemoryEffectsOpInterface>, @@ -52,4 +57,53 @@ def KnobOp : Op<Transform_Dialect, "tune.knob", [ "`<` $name `>` (`=` $selected^ `from`)? `options` `=` $options attr-dict `->` type(results)"; } +//===----------------------------------------------------------------------===// +// AlternativesOp +//===----------------------------------------------------------------------===// + +def AlternativesOp : Op<Transform_Dialect, "tune.alternatives", [ + DeclareOpInterfaceMethods<RegionBranchOpInterface, + ["getEntrySuccessorOperands", "getSuccessorRegions", + "getRegionInvocationBounds"]>, + DeclareOpInterfaceMethods<TransformOpInterface>, + DeclareOpInterfaceMethods<MemoryEffectsOpInterface>, + SingleBlockImplicitTerminator<"::mlir::transform::YieldOp">, + NoRegionArguments +]> { + let summary = "Represents a choice among its regions, i.e. sub-schedules"; + + let description = [{ + This op represents a choice over which of its regions is to be used. + + When `selected_region` is provided, the semantics are that this op is to be + substituted for by the selected region, meaning the region's results become + the results of this op. Without a provided `selected_region`, the semantics + are that this non-deterministic choice is yet to be resolved -- which in + terms of the op's interpreted semantics is a failure. + + The `selected_region` argument is either an `IntegerAttr` or a param holding + an `IntegerAttr`, which should provide a valid zero-based index with respect + to the number of alternatives, i.e. regions. + }]; + let cppNamespace = [{ mlir::transform::tune }]; + + let arguments = (ins Builtin_StringAttr:$name, + OptionalAttr<APIntAttr>:$selected_region_attr, + Optional<TransformParamTypeInterface>:$selected_region_param); + let results = (outs Variadic<Transform_AnyHandleOrParamType>:$results); + let regions = (region VariadicRegion<SizedRegion<1>>:$alternatives); + + let assemblyFormat = [{ + `<` $name `>` + (`selected_region` `=` custom<AlternativesOpSelectedRegion>( + $selected_region_attr, $selected_region_param)^)? + attr-dict-with-keyword + (`:` type($selected_region_param)^)? + (`->` type($results)^)? + regions + }]; + + let hasVerifier = 1; +} + #endif // MLIR_DIALECT_TRANSFORM_TUNEEXTENSION_TUNEEXTENSIONOPS diff --git a/mlir/lib/Bindings/Python/IRCore.cpp b/mlir/lib/Bindings/Python/IRCore.cpp index 83a8757..c20b211 100644 --- a/mlir/lib/Bindings/Python/IRCore.cpp +++ b/mlir/lib/Bindings/Python/IRCore.cpp @@ -3485,15 +3485,21 @@ void mlir::python::populateIRCore(nb::module_ &m) { }, "Shortcut to get an op result if it has only one (throws an error " "otherwise).") - .def_prop_ro( + .def_prop_rw( "location", [](PyOperationBase &self) { PyOperation &operation = self.getOperation(); return PyLocation(operation.getContext(), mlirOperationGetLocation(operation.get())); }, - "Returns the source location the operation was defined or derived " - "from.") + [](PyOperationBase &self, const PyLocation &location) { + PyOperation &operation = self.getOperation(); + mlirOperationSetLocation(operation.get(), location.get()); + }, + nb::for_getter("Returns the source location the operation was " + "defined or derived from."), + nb::for_setter("Sets the source location the operation was defined " + "or derived from.")) .def_prop_ro("parent", [](PyOperationBase &self) -> std::optional<nb::typed<nb::object, PyOperation>> { diff --git a/mlir/lib/CAPI/IR/IR.cpp b/mlir/lib/CAPI/IR/IR.cpp index e9844a7..1881865 100644 --- a/mlir/lib/CAPI/IR/IR.cpp +++ b/mlir/lib/CAPI/IR/IR.cpp @@ -656,6 +656,10 @@ MlirLocation mlirOperationGetLocation(MlirOperation op) { return wrap(unwrap(op)->getLoc()); } +void mlirOperationSetLocation(MlirOperation op, MlirLocation loc) { + unwrap(op)->setLoc(unwrap(loc)); +} + MlirTypeID mlirOperationGetTypeID(MlirOperation op) { if (auto info = unwrap(op)->getRegisteredInfo()) return wrap(info->getTypeID()); diff --git a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp index 7cfd6d3..898d76c 100644 --- a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp +++ b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp @@ -1282,6 +1282,13 @@ OpFoldResult arith::MulFOp::fold(FoldAdaptor adaptor) { if (matchPattern(adaptor.getRhs(), m_OneFloat())) return getLhs(); + if (arith::bitEnumContainsAll(getFastmath(), arith::FastMathFlags::nnan | + arith::FastMathFlags::nsz)) { + // mulf(x, 0) -> 0 + if (matchPattern(adaptor.getRhs(), m_AnyZeroFloat())) + return getRhs(); + } + return constFoldBinaryOp<FloatAttr>( adaptor.getOperands(), [](const APFloat &a, const APFloat &b) { return a * b; }); diff --git a/mlir/lib/Dialect/Math/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Math/Transforms/CMakeLists.txt index ff62b51..8899c3a 100644 --- a/mlir/lib/Dialect/Math/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/Math/Transforms/CMakeLists.txt @@ -3,6 +3,7 @@ add_mlir_dialect_library(MLIRMathTransforms ExpandOps.cpp ExtendToSupportedTypes.cpp PolynomialApproximation.cpp + SincosFusion.cpp UpliftToFMA.cpp ADDITIONAL_HEADER_DIRS diff --git a/mlir/lib/Dialect/Math/Transforms/SincosFusion.cpp b/mlir/lib/Dialect/Math/Transforms/SincosFusion.cpp new file mode 100644 index 0000000..69407df --- /dev/null +++ b/mlir/lib/Dialect/Math/Transforms/SincosFusion.cpp @@ -0,0 +1,80 @@ +//===- SincosFusion.cpp - Fuse sin/cos into sincos -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/Math/IR/Math.h" +#include "mlir/Dialect/Math/Transforms/Passes.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" + +using namespace mlir; +using namespace mlir::math; + +namespace { + +/// Fuse a math.sin and math.cos in the same block that use the same operand and +/// have identical fastmath flags into a single math.sincos. +struct SincosFusionPattern : OpRewritePattern<math::SinOp> { + using Base::Base; + + LogicalResult matchAndRewrite(math::SinOp sinOp, + PatternRewriter &rewriter) const override { + Value operand = sinOp.getOperand(); + mlir::arith::FastMathFlags sinFastMathFlags = sinOp.getFastmath(); + + math::CosOp cosOp = nullptr; + sinOp->getBlock()->walk([&](math::CosOp op) { + if (op.getOperand() == operand && op.getFastmath() == sinFastMathFlags) { + cosOp = op; + return WalkResult::interrupt(); + } + return WalkResult::advance(); + }); + + if (!cosOp) + return failure(); + + Operation *firstOp = sinOp->isBeforeInBlock(cosOp) ? sinOp.getOperation() + : cosOp.getOperation(); + rewriter.setInsertionPoint(firstOp); + + Type elemType = sinOp.getType(); + auto sincos = math::SincosOp::create(rewriter, firstOp->getLoc(), + TypeRange{elemType, elemType}, operand, + sinOp.getFastmathAttr()); + + rewriter.replaceOp(sinOp, sincos.getSin()); + rewriter.replaceOp(cosOp, sincos.getCos()); + return success(); + } +}; + +} // namespace + +namespace mlir::math { +#define GEN_PASS_DEF_MATHSINCOSFUSIONPASS +#include "mlir/Dialect/Math/Transforms/Passes.h.inc" +} // namespace mlir::math + +namespace { + +struct MathSincosFusionPass final + : math::impl::MathSincosFusionPassBase<MathSincosFusionPass> { + using MathSincosFusionPassBase::MathSincosFusionPassBase; + + void runOnOperation() override { + RewritePatternSet patterns(&getContext()); + patterns.add<SincosFusionPattern>(&getContext()); + + GreedyRewriteConfig config; + if (failed( + applyPatternsGreedily(getOperation(), std::move(patterns), config))) + return signalPassFailure(); + } +}; + +} // namespace diff --git a/mlir/lib/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp b/mlir/lib/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp index 842e880..c627158 100644 --- a/mlir/lib/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp +++ b/mlir/lib/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp @@ -6,13 +6,24 @@ // //===----------------------------------------------------------------------===// +#include "mlir/Dialect/Transform/IR/TransformOps.h" #include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.h" +#include "mlir/IR/OpImplementation.h" #include "llvm/Support/Debug.h" #include "mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h" using namespace mlir; +static ParseResult parseAlternativesOpSelectedRegion( + OpAsmParser &parser, IntegerAttr &selectedRegionAttr, + std::optional<OpAsmParser::UnresolvedOperand> &selectedRegionParam); + +static void printAlternativesOpSelectedRegion(OpAsmPrinter &printer, + Operation *op, + IntegerAttr selectedRegionAttr, + Value selectedRegionParam); + #define GET_OP_CLASSES #include "mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp.inc" @@ -57,3 +68,176 @@ LogicalResult transform::tune::KnobOp::verify() { return success(); } + +//===----------------------------------------------------------------------===// +// AlternativesOp +//===----------------------------------------------------------------------===// + +static ParseResult parseAlternativesOpSelectedRegion( + OpAsmParser &parser, IntegerAttr &selectedRegionAttr, + std::optional<OpAsmParser::UnresolvedOperand> &selectedRegionParam) { + size_t selectedRegionIdx; + OptionalParseResult attrParseRes = + parser.parseOptionalInteger(selectedRegionIdx); + if (attrParseRes.has_value()) { + if (failed(*attrParseRes)) + return failure(); + + selectedRegionAttr = parser.getBuilder().getIndexAttr(selectedRegionIdx); + return success(); + } + + OpAsmParser::UnresolvedOperand param; + auto paramParseRes = parser.parseOptionalOperand(param); + if (paramParseRes.has_value()) { + if (failed(*paramParseRes)) + return failure(); + + selectedRegionParam = param; + return success(); + } + + return parser.emitError(parser.getCurrentLocation()) + << "expected either an integer attribute or a transform.param operand"; +} + +static void printAlternativesOpSelectedRegion(OpAsmPrinter &printer, + Operation *op, + IntegerAttr selectedRegionAttr, + Value selectedRegionParam) { + if (selectedRegionAttr) + printer << selectedRegionAttr.getValue(); + if (selectedRegionParam) + printer << selectedRegionParam; +} + +OperandRange transform::tune::AlternativesOp::getEntrySuccessorOperands( + RegionBranchPoint point) { + // No operands will be forwarded to the region(s). + return getOperands().slice(0, 0); +} + +void transform::tune::AlternativesOp::getSuccessorRegions( + RegionBranchPoint point, SmallVectorImpl<RegionSuccessor> ®ions) { + if (point.isParent()) + if (auto selectedRegionIdx = getSelectedRegionAttr()) + regions.emplace_back( + &getAlternatives()[selectedRegionIdx->getSExtValue()], + Block::BlockArgListType()); + else + for (Region &alternative : getAlternatives()) + regions.emplace_back(&alternative, Block::BlockArgListType()); + else + regions.emplace_back(getOperation()->getResults()); +} + +void transform::tune::AlternativesOp::getRegionInvocationBounds( + ArrayRef<Attribute> operands, SmallVectorImpl<InvocationBounds> &bounds) { + (void)operands; + bounds.reserve(getNumRegions()); + + if (auto selectedRegionIdx = getSelectedRegionAttr()) { + bounds.resize(getNumRegions(), InvocationBounds(0, 0)); + bounds[selectedRegionIdx->getSExtValue()] = InvocationBounds(1, 1); + } else { + bounds.resize(getNumRegions(), InvocationBounds(0, 1)); + } +} + +void transform::tune::AlternativesOp::getEffects( + SmallVectorImpl<MemoryEffects::EffectInstance> &effects) { + onlyReadsHandle(getSelectedRegionParamMutable(), effects); + producesHandle(getOperation()->getOpResults(), effects); + // TODO: should effects from regions be forwarded? +} + +DiagnosedSilenceableFailure +transform::tune::AlternativesOp::apply(transform::TransformRewriter &rewriter, + transform::TransformResults &results, + transform::TransformState &state) { + std::optional<size_t> selectedRegionIdx; + + if (auto selectedRegionAttr = getSelectedRegionAttr()) + selectedRegionIdx = selectedRegionAttr->getSExtValue(); + + if (Value selectedRegionParam = getSelectedRegionParam()) { + ArrayRef<Attribute> associatedAttrs = state.getParams(selectedRegionParam); + IntegerAttr selectedRegionAttr; + if (associatedAttrs.size() != 1 || + !(selectedRegionAttr = dyn_cast<IntegerAttr>(associatedAttrs[0]))) + return emitDefiniteFailure() + << "param should hold exactly one integer attribute, got: " + << associatedAttrs[0]; + selectedRegionIdx = selectedRegionAttr.getValue().getSExtValue(); + } + + if (!selectedRegionIdx) + return emitDefiniteFailure() << "non-deterministic choice " << getName() + << " is only resolved through providing a " + "`selected_region` attr/param"; + + if (*selectedRegionIdx < 0 || *selectedRegionIdx >= getNumRegions()) + return emitDefiniteFailure() + << "'selected_region' attribute/param specifies region at index " + << *selectedRegionIdx << " while op has only " << getNumRegions() + << " regions"; + + Region &selectedRegion = getRegion(*selectedRegionIdx); + auto scope = state.make_region_scope(selectedRegion); + Block &block = selectedRegion.front(); + // Apply the region's ops one by one. + for (Operation &transform : block.without_terminator()) { + DiagnosedSilenceableFailure result = + state.applyTransform(cast<transform::TransformOpInterface>(transform)); + if (result.isDefiniteFailure()) + return result; + + if (result.isSilenceableFailure()) { + for (const auto &res : getResults()) + results.set(res, {}); + return result; + } + } + // Forward the operation mapping for values yielded from the region to the + // values produced by the alternatives op. + transform::detail::forwardTerminatorOperands(&block, state, results); + return DiagnosedSilenceableFailure::success(); +} + +LogicalResult transform::tune::AlternativesOp::verify() { + for (auto *region : getRegions()) { + auto yieldTerminator = + llvm::dyn_cast_if_present<transform::YieldOp>(region->front().back()); + if (!yieldTerminator) + return emitOpError() << "expected '" + << transform::YieldOp::getOperationName() + << "' as terminator"; + + if (yieldTerminator->getNumOperands() != getNumResults()) + return yieldTerminator.emitOpError() + << "expected terminator to have as many operands as the parent op " + "has results"; + + for (auto [i, operandType, resultType] : llvm::zip_equal( + llvm::seq<unsigned>(0, yieldTerminator->getNumOperands()), + yieldTerminator->getOperands().getType(), getResultTypes())) { + if (operandType == resultType) + continue; + return yieldTerminator.emitOpError() + << "the type of the terminator operand #" << i + << " must match the type of the corresponding parent op result (" + << operandType << " vs " << resultType << ")"; + } + } + + if (auto selectedRegionAttr = getSelectedRegionAttr()) { + size_t regionIdx = selectedRegionAttr->getSExtValue(); + if (regionIdx < 0 || regionIdx >= getNumRegions()) + return emitOpError() + << "'selected_region' attribute specifies region at index " + << regionIdx << " while op has only " << getNumRegions() + << " regions"; + } + + return success(); +} diff --git a/mlir/lib/IR/Builders.cpp b/mlir/lib/IR/Builders.cpp index c84e760..8f199b6 100644 --- a/mlir/lib/IR/Builders.cpp +++ b/mlir/lib/IR/Builders.cpp @@ -489,13 +489,6 @@ OpBuilder::tryFold(Operation *op, SmallVectorImpl<Value> &results, SmallVector<OpFoldResult, 4> foldResults; LDBG() << "Trying to fold: " << OpWithFlags(op, OpPrintingFlags().skipRegions()); - if (op->getName().getStringRef() == "vector.extract") { - Operation *parent = op->getParentOp(); - while (parent && parent->getName().getStringRef() != "spirv.func") - parent = parent->getParentOp(); - if (parent) - parent->dump(); - } if (failed(op->fold(foldResults))) return cleanupFailure(); diff --git a/mlir/python/mlir/dialects/transform/tune.py b/mlir/python/mlir/dialects/transform/tune.py index f63f88a..b3bfa80 100644 --- a/mlir/python/mlir/dialects/transform/tune.py +++ b/mlir/python/mlir/dialects/transform/tune.py @@ -6,6 +6,9 @@ from typing import Optional, Sequence from ...ir import ( Type, + Value, + Operation, + OpView, Attribute, ArrayAttr, StringAttr, @@ -19,7 +22,10 @@ from .._transform_tune_extension_ops_gen import * from .._transform_tune_extension_ops_gen import _Dialect try: - from .._ods_common import _cext as _ods_cext + from .._ods_common import ( + get_op_result_or_value as _get_op_result_or_value, + _cext as _ods_cext, + ) except ImportError as e: raise RuntimeError("Error loading imports from extension module") from e @@ -36,7 +42,7 @@ class KnobOp(KnobOp): ArrayAttr, Sequence[Union[Attribute, bool, int, float, str]], Attribute ], *, - selected: Optional[Attribute] = None, + selected: Optional[Union[Attribute, bool, int, float, str]] = None, loc=None, ip=None, ): @@ -75,8 +81,62 @@ def knob( ArrayAttr, Sequence[Union[Attribute, bool, int, float, str]], Attribute ], *, - selected: Optional[Attribute] = None, + selected: Optional[Union[Attribute, bool, int, float, str]] = None, loc=None, ip=None, ): return KnobOp(result, name, options, selected=selected, loc=loc, ip=ip) + + +@_ods_cext.register_operation(_Dialect, replace=True) +class AlternativesOp(AlternativesOp): + def __init__( + self, + results: Sequence[Type], + name: Union[StringAttr, str], + num_alternatives: int, + *, + selected_region: Optional[ + Union[int, IntegerAttr, Value, Operation, OpView] + ] = None, + loc=None, + ip=None, + ): + if isinstance(name, str): + name = StringAttr.get(name) + + selected_region_attr = selected_region_param = None + if isinstance(selected_region, IntegerAttr): + selected_region_attr = selected_region + elif isinstance(selected_region, int): + selected_region_attr = IntegerAttr.get( + IntegerType.get_signless(32), selected_region + ) + elif isinstance(selected_region, (Value, Operation, OpView)): + selected_region_param = _get_op_result_or_value(selected_region) + + super().__init__( + results, + name, + num_alternatives, + selected_region_attr=selected_region_attr, + selected_region_param=selected_region_param, + loc=loc, + ip=ip, + ) + for region in self.regions: + region.blocks.append() + + +def alternatives( + results: Sequence[Type], + name: Union[StringAttr, str], + num_alternatives: int, + *, + selected_region: Optional[Union[int, IntegerAttr, Value, Operation, OpView]] = None, + loc=None, + ip=None, +): + return AlternativesOp( + results, name, num_alternatives, selected_region=selected_region, loc=loc, ip=ip + ) diff --git a/mlir/test/Dialect/Arith/canonicalize.mlir b/mlir/test/Dialect/Arith/canonicalize.mlir index ca3de3a..2fe0995 100644 --- a/mlir/test/Dialect/Arith/canonicalize.mlir +++ b/mlir/test/Dialect/Arith/canonicalize.mlir @@ -2216,6 +2216,18 @@ func.func @test_mulf1(%arg0 : f32, %arg1 : f32) -> (f32) { return %2 : f32 } +// CHECK-LABEL: @test_mulf2( +func.func @test_mulf2(%arg0 : f32) -> (f32, f32) { + // CHECK-DAG: %[[C0:.+]] = arith.constant 0.000000e+00 : f32 + // CHECK-DAG: %[[C0n:.+]] = arith.constant -0.000000e+00 : f32 + // CHECK-NEXT: return %[[C0]], %[[C0n]] + %c0 = arith.constant 0.0 : f32 + %c0n = arith.constant -0.0 : f32 + %0 = arith.mulf %c0, %arg0 fastmath<nnan,nsz> : f32 + %1 = arith.mulf %c0n, %arg0 fastmath<nnan,nsz> : f32 + return %0, %1 : f32, f32 +} + // ----- // CHECK-LABEL: @test_divf( diff --git a/mlir/test/Dialect/Math/sincos-fusion.mlir b/mlir/test/Dialect/Math/sincos-fusion.mlir new file mode 100644 index 0000000..29fb9f1 --- /dev/null +++ b/mlir/test/Dialect/Math/sincos-fusion.mlir @@ -0,0 +1,86 @@ +// RUN: mlir-opt -math-sincos-fusion %s | FileCheck %s + +// CHECK-LABEL: func.func @sincos_fusion( +// CHECK-SAME: %[[ARG0:.*]]: f32, +// CHECK-SAME: %[[ARG1:.*]]: f32) -> (f32, f32, f32, f32) { +// CHECK: %[[VAL_0:.*]], %[[VAL_1:.*]] = math.sincos %[[ARG0]] : f32 +// CHECK: %[[VAL_2:.*]], %[[VAL_3:.*]] = math.sincos %[[ARG1]] : f32 +// CHECK: return %[[VAL_0]], %[[VAL_1]], %[[VAL_3]], %[[VAL_2]] : f32, f32, f32, f32 +// CHECK: } +func.func @sincos_fusion(%arg0 : f32, %arg1 : f32) -> (f32, f32, f32, f32) { + %0 = math.sin %arg0 : f32 + %1 = math.cos %arg0 : f32 + + %2 = math.cos %arg1 : f32 + %3 = math.sin %arg1 : f32 + + func.return %0, %1, %2, %3 : f32, f32, f32, f32 +} + +func.func private @sink(%arg0 : f32) + +// CHECK: func.func private @sink(f32) +// CHECK-LABEL: func.func @sincos_ensure_ssa_dominance( +// CHECK-SAME: %[[ARG0:.*]]: f32, +// CHECK-SAME: %[[ARG1:.*]]: f32) -> (f32, f32, f32, f32) { +// CHECK: %[[VAL_0:.*]], %[[VAL_1:.*]] = math.sincos %[[ARG0]] : f32 +// CHECK: call @sink(%[[VAL_0]]) : (f32) -> () +// CHECK: %[[VAL_2:.*]], %[[VAL_3:.*]] = math.sincos %[[ARG1]] : f32 +// CHECK: call @sink(%[[VAL_3]]) : (f32) -> () +// CHECK: return %[[VAL_0]], %[[VAL_1]], %[[VAL_3]], %[[VAL_2]] : f32, f32, f32, f32 +// CHECK: } +func.func @sincos_ensure_ssa_dominance(%arg0 : f32, %arg1 : f32) -> (f32, f32, f32, f32) { + %0 = math.sin %arg0 : f32 + func.call @sink(%0) : (f32) -> () + %1 = math.cos %arg0 : f32 + %2 = math.cos %arg1 : f32 + func.call @sink(%2) : (f32) -> () + %3 = math.sin %arg1 : f32 + func.return %0, %1, %2, %3 : f32, f32, f32, f32 +} + +// CHECK-LABEL: func.func @sincos_fusion_no_match_fmf( +// CHECK-SAME: %[[ARG0:.*]]: f32) -> (f32, f32) { +// CHECK: %[[VAL_0:.*]] = math.sin %[[ARG0]] fastmath<contract> : f32 +// CHECK: %[[VAL_1:.*]] = math.cos %[[ARG0]] : f32 +// CHECK: return %[[VAL_0]], %[[VAL_1]] : f32, f32 +// CHECK: } +func.func @sincos_fusion_no_match_fmf(%arg0 : f32) -> (f32, f32) { + %0 = math.sin %arg0 fastmath<contract> : f32 + %1 = math.cos %arg0 : f32 + func.return %0, %1 : f32, f32 +} + +// CHECK-LABEL: func.func @sincos_no_fusion_different_block( +// CHECK-SAME: %[[ARG0:.*]]: f32, +// CHECK-SAME: %[[ARG1:.*]]: i1) -> f32 { +// CHECK: %[[VAL_0:.*]] = scf.if %[[ARG1]] -> (f32) { +// CHECK: %[[VAL_1:.*]] = math.sin %[[ARG0]] : f32 +// CHECK: scf.yield %[[VAL_1]] : f32 +// CHECK: } else { +// CHECK: %[[VAL_2:.*]] = math.cos %[[ARG0]] : f32 +// CHECK: scf.yield %[[VAL_2]] : f32 +// CHECK: } +// CHECK: return %[[VAL_0]] : f32 +// CHECK: } +func.func @sincos_no_fusion_different_block(%arg0 : f32, %flag : i1) -> f32 { + %0 = scf.if %flag -> f32 { + %s = math.sin %arg0 : f32 + scf.yield %s : f32 + } else { + %c = math.cos %arg0 : f32 + scf.yield %c : f32 + } + func.return %0 : f32 +} + +// CHECK-LABEL: func.func @sincos_fusion_preserve_fastmath( +// CHECK-SAME: %[[ARG0:.*]]: f32) -> (f32, f32) { +// CHECK: %[[VAL_0:.*]], %[[VAL_1:.*]] = math.sincos %[[ARG0]] fastmath<contract> : f32 +// CHECK: return %[[VAL_0]], %[[VAL_1]] : f32, f32 +// CHECK: } +func.func @sincos_fusion_preserve_fastmath(%arg0 : f32) -> (f32, f32) { + %0 = math.sin %arg0 fastmath<contract> : f32 + %1 = math.cos %arg0 fastmath<contract> : f32 + func.return %0, %1 : f32, f32 +} diff --git a/mlir/test/Dialect/Transform/test-tune-extension-invalid.mlir b/mlir/test/Dialect/Transform/test-tune-extension-invalid.mlir index 2e5f433..efc3890 100644 --- a/mlir/test/Dialect/Transform/test-tune-extension-invalid.mlir +++ b/mlir/test/Dialect/Transform/test-tune-extension-invalid.mlir @@ -19,3 +19,88 @@ module attributes {transform.with_named_sequence} { transform.yield } } + +// ----- + +func.func private @f() + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + // expected-error@below {{'selected_region' attribute specifies region at index 2 while op has only 2 regions}} + transform.tune.alternatives<"bifurcation"> selected_region = 2 { + transform.yield + }, { + transform.yield + } + transform.yield + } +} + +// ----- + +func.func private @f() + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %singleton_of_c0 = transform.param.constant [0] -> !transform.any_param + // expected-error@below {{param should hold exactly one integer attribute, got: [0]}} + transform.tune.alternatives<"bifurcation"> selected_region = %singleton_of_c0 : !transform.any_param { + transform.yield + }, { + transform.yield + } + transform.yield + } +} + +// ----- + +func.func private @f() + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %c0 = transform.param.constant 0 -> !transform.any_param + %c1 = transform.param.constant 1 -> !transform.any_param + %c0_and_c1 = transform.merge_handles %c0, %c1 : !transform.any_param + // expected-error@below {{param should hold exactly one integer attribute}} + transform.tune.alternatives<"bifurcation"> selected_region = %c0_and_c1 : !transform.any_param { + transform.yield + }, { + transform.yield + } + transform.yield + } +} + +// ----- + +func.func private @f() + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %c2 = transform.param.constant 2 -> !transform.any_param + // expected-error@below {{'selected_region' attribute/param specifies region at index 2 while op has only 2 regions}} + transform.tune.alternatives<"bifurcation"> selected_region = %c2 : !transform.any_param { + transform.yield + }, { + transform.yield + } + transform.yield + } +} + +// ----- + +func.func private @f() + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + // expected-error@below {{non-deterministic choice "bifurcation" is only resolved through providing a `selected_region` attr/param}} + transform.tune.alternatives<"bifurcation"> { + transform.yield + }, { + transform.yield + } + transform.yield + } +} diff --git a/mlir/test/Dialect/Transform/test-tune-extension.mlir b/mlir/test/Dialect/Transform/test-tune-extension.mlir index 0a253c6..5da48a2 100644 --- a/mlir/test/Dialect/Transform/test-tune-extension.mlir +++ b/mlir/test/Dialect/Transform/test-tune-extension.mlir @@ -59,3 +59,129 @@ module attributes {transform.with_named_sequence} { transform.yield } } + + +// ----- + +// CHECK-LABEL: schedule_with_two_independent_choices_already_made +func.func @schedule_with_two_independent_choices_already_made( + %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>) + -> tensor<128x128xf32> { +// CHECK-NOT: scf.forall +// CHECK: scf.for +// CHECK-NOT: scf.for +// CHECK: scf.forall +// CHECK-NOT: scf.for +// CHECK: tensor.extract_slice +// CHECK: tensor.extract_slice +// CHECK: tensor.extract_slice +// CHECK: linalg.matmul +// CHECK: scf.forall.in_parallel +// CHECK: tensor.parallel_insert_slice +// CHECK: tensor.insert_slice +// CHECK: scf.yield + %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>) + outs(%arg2: tensor<128x128xf32>) -> tensor<128x128xf32> + return %0 : tensor<128x128xf32> +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg0 : (!transform.any_op) -> !transform.any_op + + %tiled_matmul = transform.tune.alternatives<"outer_par_or_seq_tiling"> selected_region = 0 -> !transform.any_op + { // First alternative/region, with index = 0 + %contained_matmul, %loop = transform.structured.tile_using_for %matmul tile_sizes [8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) + transform.yield %contained_matmul : !transform.any_op + }, { // Second alternative/region, with index = 1 + %contained_matmul, %loop = transform.structured.tile_using_forall %matmul tile_sizes [8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) + transform.yield %contained_matmul : !transform.any_op + } + + transform.tune.alternatives<"inner_par_or_seq_tiling"> selected_region = 1 -> !transform.any_op { + %contained_matmul, %loop = transform.structured.tile_using_for %tiled_matmul tile_sizes [0, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) + transform.yield %contained_matmul : !transform.any_op + }, { + %contained_matmul, %loop = transform.structured.tile_using_forall %tiled_matmul tile_sizes [0, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) + transform.yield %contained_matmul : !transform.any_op + } + + transform.yield + } +} + +// ----- + +// CHECK-LABEL: subschedule_with_choice_resolved_in_main_schedule +func.func @subschedule_with_choice_resolved_in_main_schedule( + %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>) + -> tensor<128x128xf32> { +// CHECK-NOT: scf.for +// CHECK: scf.forall +// CHECK-NOT: scf.forall +// CHECK: scf.for +// CHECK-NOT: scf.forall +// CHECK: tensor.extract_slice +// CHECK: tensor.extract_slice +// CHECK: tensor.extract_slice +// CHECK: linalg.matmul +// CHECK: tensor.insert_slice +// CHECK: scf.yield +// CHECK: scf.forall.in_parallel +// CHECK: tensor.parallel_insert_slice + %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>) + outs(%arg2: tensor<128x128xf32>) -> tensor<128x128xf32> + return %0 : tensor<128x128xf32> +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @subschedule_with_embedded_choice(%matmul: !transform.any_op {transform.readonly}, + %par_or_seq: !transform.param<i64> {transform.readonly}, + %tile_size: !transform.param<i64> {transform.readonly}) -> !transform.any_op { + %tiled_matmul = transform.tune.alternatives<"par_or_seq_tiling"> selected_region = %par_or_seq : !transform.param<i64> -> !transform.any_op { + %contained_matmul, %loop = transform.structured.tile_using_for %matmul tile_sizes [%tile_size] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op) + transform.yield %contained_matmul : !transform.any_op + }, { + %contained_matmul, %loop = transform.structured.tile_using_forall %matmul tile_sizes [%tile_size] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op) + transform.yield %contained_matmul : !transform.any_op + } + transform.yield %tiled_matmul : !transform.any_op + } + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg0 : (!transform.any_op) -> !transform.any_op + %outer_par = transform.param.constant 1 -> !transform.param<i64> + %outer_tile_size = transform.param.constant 32 -> !transform.param<i64> + %inner_seq = transform.tune.knob<"inner_par_or_seq"> = 0 from options = [0, 1] -> !transform.param<i64> + %inner_tile_size = transform.param.constant 8 -> !transform.param<i64> + %tiled_matmul = transform.include @subschedule_with_embedded_choice failures(propagate) (%matmul, %outer_par, %outer_tile_size) : (!transform.any_op, !transform.param<i64>, !transform.param<i64>) -> !transform.any_op + %tiled_tiled_matmul = transform.include @subschedule_with_embedded_choice failures(propagate) (%tiled_matmul, %inner_seq, %inner_tile_size) : (!transform.any_op, !transform.param<i64>, !transform.param<i64>) -> !transform.any_op + transform.yield + } +} + +// ----- + +// CHECK-LABEL: eeny_meeny_miny_moe +func.func private @eeny_meeny_miny_moe() + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg0 : (!transform.any_op) -> !transform.any_op + + %tiled_matmul = transform.tune.alternatives<"4way"> selected_region = 3 -> !transform.any_param + { // First alternative/region, with index = 0 + %out = transform.param.constant "eeny" -> !transform.any_param + transform.yield %out : !transform.any_param + }, { // Second alternative/region, with index = 1 + %out = transform.param.constant "meeny" -> !transform.any_param + transform.yield %out : !transform.any_param + }, { // Third alternative/region, with index = 2 + %out = transform.param.constant "miny" -> !transform.any_param + transform.yield %out : !transform.any_param + }, { // Fourth alternative/region, with index = 3 + %out = transform.param.constant "moe" -> !transform.any_param + transform.yield %out : !transform.any_param + } + transform.yield + } +}
\ No newline at end of file diff --git a/mlir/test/python/dialects/transform_tune_ext.py b/mlir/test/python/dialects/transform_tune_ext.py index dfb9359..eb2a083 100644 --- a/mlir/test/python/dialects/transform_tune_ext.py +++ b/mlir/test/python/dialects/transform_tune_ext.py @@ -1,21 +1,21 @@ # RUN: %PYTHON %s | FileCheck %s -from mlir.ir import * +from mlir import ir from mlir.dialects import transform from mlir.dialects.transform import tune, debug def run(f): - print("\nTEST:", f.__name__) - with Context(), Location.unknown(): - module = Module.create() - with InsertionPoint(module.body): + print("\n// TEST:", f.__name__) + with ir.Context(), ir.Location.unknown(): + module = ir.Module.create() + with ir.InsertionPoint(module.body): sequence = transform.SequenceOp( transform.FailurePropagationMode.Propagate, [], transform.AnyOpType.get(), ) - with InsertionPoint(sequence.body): + with ir.InsertionPoint(sequence.body): f(sequence.bodyTarget) transform.YieldOp() print(module) @@ -29,10 +29,10 @@ def testKnobOp(target): # CHECK: %[[HEADS_OR_TAILS:.*]] = transform.tune.knob<"coin"> options = [true, false] -> !transform.any_param heads_or_tails = tune.KnobOp( - result=any_param, name=StringAttr.get("coin"), options=[True, False] + result=any_param, name=ir.StringAttr.get("coin"), options=[True, False] ) # CHECK: transform.tune.knob<"animal"> options = ["cat", "dog", unit] -> !transform.any_param - tune.KnobOp(any_param, name="animal", options=["cat", "dog", UnitAttr.get()]) + tune.KnobOp(any_param, name="animal", options=["cat", "dog", ir.UnitAttr.get()]) # CHECK: transform.tune.knob<"tile_size"> options = [2, 4, 8, 16, 24, 32] -> !transform.any_param tune.KnobOp(any_param, "tile_size", [2, 4, 8, 16, 24, 32]) # CHECK: transform.tune.knob<"magic_value"> options = [2.000000e+00, 2.250000e+00, 2.500000e+00, 2.750000e+00, 3.000000e+00] -> !transform.any_param @@ -45,7 +45,10 @@ def testKnobOp(target): heads = tune.KnobOp(any_param, "coin", options=[True, False], selected=True) # CHECK: transform.tune.knob<"animal"> = "dog" from options = ["cat", "dog", unit] -> !transform.any_param tune.KnobOp( - any_param, name="animal", options=["cat", "dog", UnitAttr.get()], selected="dog" + any_param, + name="animal", + options=["cat", "dog", ir.UnitAttr.get()], + selected="dog", ) # CHECK: transform.tune.knob<"tile_size"> = 8 : i64 from options = [2, 4, 8, 16, 24, 32] -> !transform.any_param tune.KnobOp(any_param, "tile_size", [2, 4, 8, 16, 24, 32], selected=8) @@ -57,16 +60,90 @@ def testKnobOp(target): # CHECK: transform.tune.knob<"range_as_a_dict"> = 4 : i64 from options = {start = 2 : i64, step = 2 : i64, stop = 16 : i64} -> !transform.any_param # NB: Membership of `selected` in non-ArrayAttr `options` is _not_ verified. - i64 = IntegerType.get_signless(64) + i64 = ir.IntegerType.get_signless(64) tune.knob( any_param, "range_as_a_dict", - DictAttr.get( + ir.DictAttr.get( { - "start": IntegerAttr.get(i64, 2), - "stop": IntegerAttr.get(i64, 16), - "step": IntegerAttr.get(i64, 2), + "start": ir.IntegerAttr.get(i64, 2), + "stop": ir.IntegerAttr.get(i64, 16), + "step": ir.IntegerAttr.get(i64, 2), } ), selected=4, ) + + +# CHECK-LABEL: TEST: testAlternativesOp +@run +def testAlternativesOp(target): + any_param = transform.AnyParamType.get() + + # CHECK: %[[LEFT_OR_RIGHT_OUTCOME:.*]] = transform.tune.alternatives<"left_or_right"> -> !transform.any_param { + left_or_right = tune.AlternativesOp( + [transform.AnyParamType.get()], "left_or_right", 2 + ) + idx_for_left, idx_for_right = 0, 1 + with ir.InsertionPoint(left_or_right.alternatives[idx_for_left].blocks[0]): + # CHECK: %[[C0:.*]] = transform.param.constant 0 + i32_0 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 0) + c0 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_0) + # CHECK: transform.yield %[[C0]] + transform.yield_(c0) + # CHECK-NEXT: }, { + with ir.InsertionPoint(left_or_right.alternatives[idx_for_right].blocks[0]): + # CHECK: %[[C1:.*]] = transform.param.constant 1 + i32_1 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 1) + c1 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_1) + # CHECK: transform.yield %[[C1]] + transform.yield_(c1) + # CHECK-NEXT: } + outcome_of_left_or_right_decision = left_or_right.results[0] + + # CHECK: transform.tune.alternatives<"fork_in_the_road"> selected_region = 0 -> !transform.any_param { + fork_in_the_road = tune.AlternativesOp( + [transform.AnyParamType.get()], "fork_in_the_road", 2, selected_region=0 + ) + with ir.InsertionPoint(fork_in_the_road.alternatives[idx_for_left].blocks[0]): + # CHECK: %[[C0:.*]] = transform.param.constant 0 + i32_0 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 0) + c0 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_0) + # CHECK: transform.yield %[[C0]] + transform.yield_(c0) + # CHECK-NEXT: }, { + with ir.InsertionPoint(fork_in_the_road.alternatives[idx_for_right].blocks[0]): + # CHECK: %[[C1:.*]] = transform.param.constant 1 + i32_1 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 1) + c1 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_1) + # CHECK: transform.yield %[[C1]] + transform.yield_(c1) + # CHECK-NEXT: } + + # CHECK: transform.tune.alternatives<"left_or_right_as_before"> selected_region = %[[LEFT_OR_RIGHT_OUTCOME]] : !transform.any_param { + left_or_right_as_before = tune.AlternativesOp( + [], + "left_or_right_as_before", + 2, + selected_region=outcome_of_left_or_right_decision, + ) + with ir.InsertionPoint( + left_or_right_as_before.alternatives[idx_for_left].blocks[0] + ): + # CHECK: transform.param.constant 1337 + i32_1337 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 1337) + c1337 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_1337) + # CHECK: transform.debug.emit_param_as_remark + debug.emit_param_as_remark(c1337) + transform.yield_([]) + # CHECK-NEXT: }, { + with ir.InsertionPoint( + left_or_right_as_before.alternatives[idx_for_right].blocks[0] + ): + # CHECK: transform.param.constant 42 + i32_42 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 42) + c42 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_42) + # CHECK: transform.debug.emit_param_as_remark + debug.emit_param_as_remark(c42) + transform.yield_([]) + # CHECK-NEXT: } diff --git a/mlir/test/python/ir/operation.py b/mlir/test/python/ir/operation.py index 4a3625c..cb4cfc8c 100644 --- a/mlir/test/python/ir/operation.py +++ b/mlir/test/python/ir/operation.py @@ -696,6 +696,7 @@ def testOperationPrint(): # CHECK: resource1: "0x08 module.operation.print(large_elements_limit=2) + # CHECK-LABEL: TEST: testKnownOpView @run def testKnownOpView(): @@ -969,6 +970,13 @@ def testOperationLoc(): assert op.location == loc assert op.operation.location == loc + another_loc = Location.name("another_loc") + op.location = another_loc + assert op.location == another_loc + assert op.operation.location == another_loc + # CHECK: loc("another_loc") + print(op.location) + # CHECK-LABEL: TEST: testModuleMerge @run diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel index e57d9de..026664b 100644 --- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel @@ -5336,6 +5336,7 @@ libc_support_library( ":__support_common", ":__support_cpp_bitset", ":__support_cpp_type_traits", + ":__support_macros_attributes", ":__support_macros_optimization", ":hdr_limits_macros", ":llvm_libc_types_size_t", |