diff options
author | Valentin Clement (バレンタイン クレメン) <clementval@gmail.com> | 2024-09-16 09:50:00 -0700 |
---|---|---|
committer | Vitaly Buka <vitalybuka@google.com> | 2024-09-16 09:50:00 -0700 |
commit | 6ed00639c6d594e54cc76405009b856693b1dbcc (patch) | |
tree | 73dc21d3293dce7955df13c46f71245d6ae7d3cd | |
parent | 15794f9b24fdeaea96aa6291da3d3028332fee8f (diff) | |
parent | bc54e5636f2080e6a35ec201d5963a2c455fe5f5 (diff) | |
download | llvm-users/vitalybuka/spr/main.nfcsanitizer-switch-to-gnu_get_libc_version.zip llvm-users/vitalybuka/spr/main.nfcsanitizer-switch-to-gnu_get_libc_version.tar.gz llvm-users/vitalybuka/spr/main.nfcsanitizer-switch-to-gnu_get_libc_version.tar.bz2 |
[𝘀𝗽𝗿] changes introduced through rebaseusers/vitalybuka/spr/main.nfcsanitizer-switch-to-gnu_get_libc_version
Created using spr 1.3.4
[skip ci]
667 files changed, 30803 insertions, 13133 deletions
diff --git a/clang-tools-extra/clang-tidy/cert/FloatLoopCounter.cpp b/clang-tools-extra/clang-tidy/cert/FloatLoopCounter.cpp index a936383..46acc9f 100644 --- a/clang-tools-extra/clang-tidy/cert/FloatLoopCounter.cpp +++ b/clang-tools-extra/clang-tidy/cert/FloatLoopCounter.cpp @@ -9,6 +9,7 @@ #include "FloatLoopCounter.h" #include "clang/AST/ASTContext.h" #include "clang/ASTMatchers/ASTMatchFinder.h" +#include "clang/ASTMatchers/ASTMatchers.h" using namespace clang::ast_matchers; @@ -16,15 +17,30 @@ namespace clang::tidy::cert { void FloatLoopCounter::registerMatchers(MatchFinder *Finder) { Finder->addMatcher( - forStmt(hasIncrement(expr(hasType(realFloatingPointType())))).bind("for"), + forStmt(hasIncrement(forEachDescendant( + declRefExpr(hasType(realFloatingPointType()), + to(varDecl().bind("var"))) + .bind("inc"))), + hasCondition(forEachDescendant( + declRefExpr(hasType(realFloatingPointType()), + to(varDecl(equalsBoundNode("var")))) + .bind("cond")))) + .bind("for"), this); } void FloatLoopCounter::check(const MatchFinder::MatchResult &Result) { const auto *FS = Result.Nodes.getNodeAs<ForStmt>("for"); - diag(FS->getInc()->getExprLoc(), "loop induction expression should not have " - "floating-point type"); + diag(FS->getInc()->getBeginLoc(), "loop induction expression should not have " + "floating-point type") + << Result.Nodes.getNodeAs<DeclRefExpr>("inc")->getSourceRange() + << Result.Nodes.getNodeAs<DeclRefExpr>("cond")->getSourceRange(); + + if (!FS->getInc()->getType()->isRealFloatingType()) + if (const auto *V = Result.Nodes.getNodeAs<VarDecl>("var")) + diag(V->getBeginLoc(), "floating-point type loop induction variable", + DiagnosticIDs::Note); } } // namespace clang::tidy::cert diff --git a/clang-tools-extra/clang-tidy/cppcoreguidelines/PreferMemberInitializerCheck.cpp b/clang-tools-extra/clang-tidy/cppcoreguidelines/PreferMemberInitializerCheck.cpp index e516b71..593a4f8 100644 --- a/clang-tools-extra/clang-tidy/cppcoreguidelines/PreferMemberInitializerCheck.cpp +++ b/clang-tools-extra/clang-tidy/cppcoreguidelines/PreferMemberInitializerCheck.cpp @@ -83,7 +83,7 @@ static void updateAssignmentLevel( memberExpr(hasObjectExpression(cxxThisExpr()), member(fieldDecl(indexNotLessThan(Field->getFieldIndex())))); auto DeclMatcher = declRefExpr( - to(varDecl(unless(parmVarDecl()), hasDeclContext(equalsNode(Ctor))))); + to(valueDecl(unless(parmVarDecl()), hasDeclContext(equalsNode(Ctor))))); const bool HasDependence = !match(expr(anyOf(MemberMatcher, DeclMatcher, hasDescendant(MemberMatcher), hasDescendant(DeclMatcher))), diff --git a/clang-tools-extra/clang-tidy/readability/AvoidUnconditionalPreprocessorIfCheck.cpp b/clang-tools-extra/clang-tidy/readability/AvoidUnconditionalPreprocessorIfCheck.cpp index d92d0e8..ca5fc35 100644 --- a/clang-tools-extra/clang-tidy/readability/AvoidUnconditionalPreprocessorIfCheck.cpp +++ b/clang-tools-extra/clang-tidy/readability/AvoidUnconditionalPreprocessorIfCheck.cpp @@ -84,7 +84,8 @@ struct AvoidUnconditionalPreprocessorIfPPCallbacks : public PPCallbacks { return (Tok.getRawIdentifier() == "true" || Tok.getRawIdentifier() == "false"); default: - return Tok.getKind() >= tok::l_square && Tok.getKind() <= tok::caretcaret; + return Tok.getKind() >= tok::l_square && + Tok.getKind() <= tok::greatergreatergreater; } } diff --git a/clang-tools-extra/clangd/Diagnostics.cpp b/clang-tools-extra/clangd/Diagnostics.cpp index d5eca08..552dd36 100644 --- a/clang-tools-extra/clangd/Diagnostics.cpp +++ b/clang-tools-extra/clangd/Diagnostics.cpp @@ -579,7 +579,17 @@ std::vector<Diag> StoreDiags::take(const clang::tidy::ClangTidyContext *Tidy) { for (auto &Diag : Output) { if (const char *ClangDiag = getDiagnosticCode(Diag.ID)) { // Warnings controlled by -Wfoo are better recognized by that name. - StringRef Warning = DiagnosticIDs::getWarningOptionForDiag(Diag.ID); + const StringRef Warning = [&] { + if (OrigSrcMgr) { + return OrigSrcMgr->getDiagnostics() + .getDiagnosticIDs() + ->getWarningOptionForDiag(Diag.ID); + } + if (!DiagnosticIDs::IsCustomDiag(Diag.ID)) + return DiagnosticIDs{}.getWarningOptionForDiag(Diag.ID); + return StringRef{}; + }(); + if (!Warning.empty()) { Diag.Name = ("-W" + Warning).str(); } else { @@ -896,20 +906,23 @@ void StoreDiags::flushLastDiag() { Output.push_back(std::move(*LastDiag)); } -bool isBuiltinDiagnosticSuppressed(unsigned ID, - const llvm::StringSet<> &Suppress, - const LangOptions &LangOpts) { +bool isDiagnosticSuppressed(const clang::Diagnostic &Diag, + const llvm::StringSet<> &Suppress, + const LangOptions &LangOpts) { // Don't complain about header-only stuff in mainfiles if it's a header. // FIXME: would be cleaner to suppress in clang, once we decide whether the // behavior should be to silently-ignore or respect the pragma. - if (ID == diag::pp_pragma_sysheader_in_main_file && LangOpts.IsHeaderFile) + if (Diag.getID() == diag::pp_pragma_sysheader_in_main_file && + LangOpts.IsHeaderFile) return true; - if (const char *CodePtr = getDiagnosticCode(ID)) { + if (const char *CodePtr = getDiagnosticCode(Diag.getID())) { if (Suppress.contains(normalizeSuppressedCode(CodePtr))) return true; } - StringRef Warning = DiagnosticIDs::getWarningOptionForDiag(ID); + StringRef Warning = + Diag.getDiags()->getDiagnosticIDs()->getWarningOptionForDiag( + Diag.getID()); if (!Warning.empty() && Suppress.contains(Warning)) return true; return false; diff --git a/clang-tools-extra/clangd/Diagnostics.h b/clang-tools-extra/clangd/Diagnostics.h index d4c0478..c45d8dc 100644 --- a/clang-tools-extra/clangd/Diagnostics.h +++ b/clang-tools-extra/clangd/Diagnostics.h @@ -181,11 +181,11 @@ private: }; /// Determine whether a (non-clang-tidy) diagnostic is suppressed by config. -bool isBuiltinDiagnosticSuppressed(unsigned ID, - const llvm::StringSet<> &Suppressed, - const LangOptions &); +bool isDiagnosticSuppressed(const clang::Diagnostic &Diag, + const llvm::StringSet<> &Suppressed, + const LangOptions &); /// Take a user-specified diagnostic code, and convert it to a normalized form -/// stored in the config and consumed by isBuiltinDiagnosticsSuppressed. +/// stored in the config and consumed by isDiagnosticsSuppressed. /// /// (This strips err_ and -W prefix so we can match with or without them.) llvm::StringRef normalizeSuppressedCode(llvm::StringRef); diff --git a/clang-tools-extra/clangd/ParsedAST.cpp b/clang-tools-extra/clangd/ParsedAST.cpp index a8b6cc8..4491be9 100644 --- a/clang-tools-extra/clangd/ParsedAST.cpp +++ b/clang-tools-extra/clangd/ParsedAST.cpp @@ -340,7 +340,7 @@ void applyWarningOptions(llvm::ArrayRef<std::string> ExtraArgs, if (Enable) { if (Diags.getDiagnosticLevel(ID, SourceLocation()) < DiagnosticsEngine::Warning) { - auto Group = DiagnosticIDs::getGroupForDiag(ID); + auto Group = Diags.getDiagnosticIDs()->getGroupForDiag(ID); if (!Group || !EnabledGroups(*Group)) continue; Diags.setSeverity(ID, diag::Severity::Warning, SourceLocation()); @@ -583,8 +583,8 @@ ParsedAST::build(llvm::StringRef Filename, const ParseInputs &Inputs, ASTDiags.setLevelAdjuster([&](DiagnosticsEngine::Level DiagLevel, const clang::Diagnostic &Info) { if (Cfg.Diagnostics.SuppressAll || - isBuiltinDiagnosticSuppressed(Info.getID(), Cfg.Diagnostics.Suppress, - Clang->getLangOpts())) + isDiagnosticSuppressed(Info, Cfg.Diagnostics.Suppress, + Clang->getLangOpts())) return DiagnosticsEngine::Ignored; auto It = OverriddenSeverity.find(Info.getID()); diff --git a/clang-tools-extra/clangd/Preamble.cpp b/clang-tools-extra/clangd/Preamble.cpp index dd13b1a..84e8fec 100644 --- a/clang-tools-extra/clangd/Preamble.cpp +++ b/clang-tools-extra/clangd/Preamble.cpp @@ -621,8 +621,8 @@ buildPreamble(PathRef FileName, CompilerInvocation CI, PreambleDiagnostics.setLevelAdjuster([&](DiagnosticsEngine::Level DiagLevel, const clang::Diagnostic &Info) { if (Cfg.Diagnostics.SuppressAll || - isBuiltinDiagnosticSuppressed(Info.getID(), Cfg.Diagnostics.Suppress, - CI.getLangOpts())) + isDiagnosticSuppressed(Info, Cfg.Diagnostics.Suppress, + CI.getLangOpts())) return DiagnosticsEngine::Ignored; switch (Info.getID()) { case diag::warn_no_newline_eof: diff --git a/clang-tools-extra/clangd/unittests/ConfigCompileTests.cpp b/clang-tools-extra/clangd/unittests/ConfigCompileTests.cpp index 4ecfdf0..021d731 100644 --- a/clang-tools-extra/clangd/unittests/ConfigCompileTests.cpp +++ b/clang-tools-extra/clangd/unittests/ConfigCompileTests.cpp @@ -298,20 +298,41 @@ TEST_F(ConfigCompileTests, DiagnosticSuppression) { "unreachable-code", "unused-variable", "typecheck_bool_condition", "unexpected_friend", "warn_alloca")); - EXPECT_TRUE(isBuiltinDiagnosticSuppressed( - diag::warn_unreachable, Conf.Diagnostics.Suppress, LangOptions())); + clang::DiagnosticsEngine DiagEngine(new DiagnosticIDs, nullptr, + new clang::IgnoringDiagConsumer); + + using Diag = clang::Diagnostic; + { + auto D = DiagEngine.Report(diag::warn_unreachable); + EXPECT_TRUE(isDiagnosticSuppressed( + Diag{&DiagEngine}, Conf.Diagnostics.Suppress, LangOptions())); + } // Subcategory not respected/suppressed. - EXPECT_FALSE(isBuiltinDiagnosticSuppressed( - diag::warn_unreachable_break, Conf.Diagnostics.Suppress, LangOptions())); - EXPECT_TRUE(isBuiltinDiagnosticSuppressed( - diag::warn_unused_variable, Conf.Diagnostics.Suppress, LangOptions())); - EXPECT_TRUE(isBuiltinDiagnosticSuppressed(diag::err_typecheck_bool_condition, - Conf.Diagnostics.Suppress, - LangOptions())); - EXPECT_TRUE(isBuiltinDiagnosticSuppressed( - diag::err_unexpected_friend, Conf.Diagnostics.Suppress, LangOptions())); - EXPECT_TRUE(isBuiltinDiagnosticSuppressed( - diag::warn_alloca, Conf.Diagnostics.Suppress, LangOptions())); + { + auto D = DiagEngine.Report(diag::warn_unreachable_break); + EXPECT_FALSE(isDiagnosticSuppressed( + Diag{&DiagEngine}, Conf.Diagnostics.Suppress, LangOptions())); + } + { + auto D = DiagEngine.Report(diag::warn_unused_variable); + EXPECT_TRUE(isDiagnosticSuppressed( + Diag{&DiagEngine}, Conf.Diagnostics.Suppress, LangOptions())); + } + { + auto D = DiagEngine.Report(diag::err_typecheck_bool_condition); + EXPECT_TRUE(isDiagnosticSuppressed( + Diag{&DiagEngine}, Conf.Diagnostics.Suppress, LangOptions())); + } + { + auto D = DiagEngine.Report(diag::err_unexpected_friend); + EXPECT_TRUE(isDiagnosticSuppressed( + Diag{&DiagEngine}, Conf.Diagnostics.Suppress, LangOptions())); + } + { + auto D = DiagEngine.Report(diag::warn_alloca); + EXPECT_TRUE(isDiagnosticSuppressed( + Diag{&DiagEngine}, Conf.Diagnostics.Suppress, LangOptions())); + } Frag.Diagnostics.Suppress.emplace_back("*"); EXPECT_TRUE(compileAndApply()); diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst index 88b9283..8d0c093 100644 --- a/clang-tools-extra/docs/ReleaseNotes.rst +++ b/clang-tools-extra/docs/ReleaseNotes.rst @@ -111,6 +111,15 @@ Changes in existing checks <clang-tidy/checks/bugprone/casting-through-void>` check to suggest replacing the offending code with ``reinterpret_cast``, to more clearly express intent. +- Improved :doc:`cert-flp30-c<clang-tidy/checks/cert/flp30-c>` check to + fix false positive that floating point variable is only used in increment + expression. + +- Improved :doc:`cppcoreguidelines-prefer-member-initializer + <clang-tidy/checks/cppcoreguidelines/prefer-member-initializer>` check to avoid + false positive when member initialization depends on a structured binging + variable. + - Improved :doc:`modernize-use-std-format <clang-tidy/checks/modernize/use-std-format>` check to support replacing member function calls too. diff --git a/clang-tools-extra/pseudo/lib/cxx/cxx.bnf b/clang-tools-extra/pseudo/lib/cxx/cxx.bnf index 36caf7b..fbd964d 100644 --- a/clang-tools-extra/pseudo/lib/cxx/cxx.bnf +++ b/clang-tools-extra/pseudo/lib/cxx/cxx.bnf @@ -639,7 +639,6 @@ operator-name := > operator-name := <= operator-name := >= operator-name := <=> -operator-name := ^^ operator-name := || operator-name := << operator-name := greatergreater diff --git a/clang-tools-extra/test/clang-tidy/checkers/cert/flp30-c.c b/clang-tools-extra/test/clang-tidy/checkers/cert/flp30-c.c index eee16be..b998588 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/cert/flp30-c.c +++ b/clang-tools-extra/test/clang-tidy/checkers/cert/flp30-c.c @@ -1,19 +1,28 @@ // RUN: %check_clang_tidy %s cert-flp30-c %t float g(void); +int c(float); +float f = 1.0f; + +void match(void) { -void func(void) { for (float x = 0.1f; x <= 1.0f; x += 0.1f) {} - // CHECK-MESSAGES: :[[@LINE-1]]:37: warning: loop induction expression should not have floating-point type [cert-flp30-c] + // CHECK-MESSAGES: :[[@LINE-1]]:35: warning: loop induction expression should not have floating-point type [cert-flp30-c] - float f = 1.0f; for (; f > 0; --f) {} - // CHECK-MESSAGES: :[[@LINE-1]]:17: warning: loop induction expression + // CHECK-MESSAGES: :[[@LINE-1]]:17: warning: loop induction expression should not have floating-point type [cert-flp30-c] - for (;;g()) {} - // CHECK-MESSAGES: :[[@LINE-1]]:10: warning: loop induction expression + for (float x = 0.0f; c(x); x = g()) {} + // CHECK-MESSAGES: :[[@LINE-1]]:30: warning: loop induction expression should not have floating-point type [cert-flp30-c] - for (int i = 0; i < 10; i += 1.0f) {} + for (int i=0; i < 10 && f < 2.0f; f++, i++) {} + // CHECK-MESSAGES: :[[@LINE-1]]:37: warning: loop induction expression should not have floating-point type [cert-flp30-c] + // CHECK-MESSAGES: :5:1: note: floating-point type loop induction variable +} +void not_match(void) { + for (int i = 0; i < 10; i += 1.0f) {} for (int i = 0; i < 10; ++i) {} + for (int i = 0; i < 10; ++i, f++) {} + for (int i = 0; f < 10.f; ++i) {} } diff --git a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/prefer-member-initializer.cpp b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/prefer-member-initializer.cpp index e784a35..fa4307d 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/prefer-member-initializer.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/prefer-member-initializer.cpp @@ -639,3 +639,14 @@ struct S3 { T M; }; } + +namespace GH82970 { +struct InitFromBingingDecl { + int m; + InitFromBingingDecl() { + struct { int i; } a; + auto [n] = a; + m = n; + } +}; +} // namespace GH82970 diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index 3929a9f..69b2aea 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -303,6 +303,10 @@ Improvements to Clang's diagnostics - Clang now warns for u8 character literals used in C23 with ``-Wpre-c23-compat`` instead of ``-Wpre-c++17-compat``. +- Clang now diagnose when importing module implementation partition units in module interface units. + +- Don't emit bogus dangling diagnostics when ``[[gsl::Owner]]`` and `[[clang::lifetimebound]]` are used together (#GH108272). + Improvements to Clang's time-trace ---------------------------------- @@ -386,7 +390,7 @@ Bug Fixes to C++ Support - Fixed a crash in the typo correction of an invalid CTAD guide. (#GH107887) - Fixed a crash when clang tries to subtitute parameter pack while retaining the parameter pack. #GH63819, #GH107560 - +- Fix a crash when a static assert declaration has an invalid close location. (#GH108687) Bug Fixes to AST Handling ^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -499,7 +503,7 @@ AST Matchers - Fixed an issue with the `hasName` and `hasAnyName` matcher when matching inline namespaces with an enclosing namespace of the same name. -- Fixed an ordering issue with the `hasOperands` matcher occuring when setting a +- Fixed an ordering issue with the `hasOperands` matcher occurring when setting a binding in the first matcher and using it in the second matcher. clang-format diff --git a/clang/docs/UsersManual.rst b/clang/docs/UsersManual.rst index f27fa4a..57d78f8 100644 --- a/clang/docs/UsersManual.rst +++ b/clang/docs/UsersManual.rst @@ -2410,6 +2410,39 @@ are listed below. link-time optimizations like whole program inter-procedural basic block reordering. +.. option:: -fcodegen-data-generate[=<path>] + + Emit the raw codegen (CG) data into custom sections in the object file. + Currently, this option also combines the raw CG data from the object files + into an indexed CG data file specified by the <path>, for LLD MachO only. + When the <path> is not specified, `default.cgdata` is created. + The CG data file combines all the outlining instances that occurred locally + in each object file. + + .. code-block:: console + + $ clang -fuse-ld=lld -Oz -fcodegen-data-generate code.cc + + For linkers that do not yet support this feature, `llvm-cgdata` can be used + manually to merge this CG data in object files. + + .. code-block:: console + + $ clang -c -fuse-ld=lld -Oz -fcodegen-data-generate code.cc + $ llvm-cgdata --merge -o default.cgdata code.o + +.. option:: -fcodegen-data-use[=<path>] + + Read the codegen data from the specified path to more effectively outline + functions across compilation units. When the <path> is not specified, + `default.cgdata` is used. This option can create many identically outlined + functions that can be optimized by the conventional linker’s identical code + folding (ICF). + + .. code-block:: console + + $ clang -fuse-ld=lld -Oz -Wl,--icf=safe -fcodegen-data-use code.cc + Profile Guided Optimization --------------------------- diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h index 3389670..cd9947f 100644 --- a/clang/include/clang/AST/RecursiveASTVisitor.h +++ b/clang/include/clang/AST/RecursiveASTVisitor.h @@ -652,9 +652,11 @@ bool RecursiveASTVisitor<Derived>::PostVisitStmt(Stmt *S) { #undef DISPATCH_STMT +// Inlining this method can lead to large code size and compile-time increases +// without any benefit to runtime performance. template <typename Derived> -bool RecursiveASTVisitor<Derived>::TraverseStmt(Stmt *S, - DataRecursionQueue *Queue) { +LLVM_ATTRIBUTE_NOINLINE bool +RecursiveASTVisitor<Derived>::TraverseStmt(Stmt *S, DataRecursionQueue *Queue) { if (!S) return true; diff --git a/clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h b/clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h index 117173b..b7b8485 100644 --- a/clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h +++ b/clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h @@ -118,14 +118,10 @@ public: static FunctionParmMutationAnalyzer * getFunctionParmMutationAnalyzer(const FunctionDecl &Func, ASTContext &Context, ExprMutationAnalyzer::Memoized &Memorized) { - auto it = Memorized.FuncParmAnalyzer.find(&Func); - if (it == Memorized.FuncParmAnalyzer.end()) - it = - Memorized.FuncParmAnalyzer - .try_emplace(&Func, std::unique_ptr<FunctionParmMutationAnalyzer>( - new FunctionParmMutationAnalyzer( - Func, Context, Memorized))) - .first; + auto [it, Inserted] = Memorized.FuncParmAnalyzer.try_emplace(&Func); + if (Inserted) + it->second = std::unique_ptr<FunctionParmMutationAnalyzer>( + new FunctionParmMutationAnalyzer(Func, Context, Memorized)); return it->getSecond().get(); } diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td index 9a7b163..70fad60 100644 --- a/clang/include/clang/Basic/Attr.td +++ b/clang/include/clang/Basic/Attr.td @@ -3358,18 +3358,16 @@ def DiagnoseIf : InheritableAttr { let Spellings = [GNU<"diagnose_if">]; let Subjects = SubjectList<[Function, ObjCMethod, ObjCProperty]>; let Args = [ExprArgument<"Cond">, StringArgument<"Message">, - EnumArgument<"DiagnosticType", "DiagnosticType", + EnumArgument<"DefaultSeverity", + "DefaultSeverity", /*is_string=*/true, - ["error", "warning"], - ["DT_Error", "DT_Warning"]>, + ["error", "warning"], + ["DS_error", "DS_warning"]>, + StringArgument<"WarningGroup", /*optional*/ 1>, BoolArgument<"ArgDependent", 0, /*fake*/ 1>, DeclArgument<Named, "Parent", 0, /*fake*/ 1>]; let InheritEvenIfAlreadyPresent = 1; let LateParsed = LateAttrParseStandard; - let AdditionalMembers = [{ - bool isError() const { return diagnosticType == DT_Error; } - bool isWarning() const { return diagnosticType == DT_Warning; } - }]; let TemplateDependent = 1; let Documentation = [DiagnoseIfDocs]; } diff --git a/clang/include/clang/Basic/Diagnostic.h b/clang/include/clang/Basic/Diagnostic.h index 0c7836c..54b69e9 100644 --- a/clang/include/clang/Basic/Diagnostic.h +++ b/clang/include/clang/Basic/Diagnostic.h @@ -336,10 +336,12 @@ private: // Map extensions to warnings or errors? diag::Severity ExtBehavior = diag::Severity::Ignored; - DiagState() + DiagnosticIDs &DiagIDs; + + DiagState(DiagnosticIDs &DiagIDs) : IgnoreAllWarnings(false), EnableAllWarnings(false), WarningsAsErrors(false), ErrorsAsFatal(false), - SuppressSystemWarnings(false) {} + SuppressSystemWarnings(false), DiagIDs(DiagIDs) {} using iterator = llvm::DenseMap<unsigned, DiagnosticMapping>::iterator; using const_iterator = @@ -870,6 +872,8 @@ public: /// \param FormatString A fixed diagnostic format string that will be hashed /// and mapped to a unique DiagID. template <unsigned N> + // TODO: Deprecate this once all uses are removed from LLVM + // [[deprecated("Use a CustomDiagDesc instead of a Level")]] unsigned getCustomDiagID(Level L, const char (&FormatString)[N]) { return Diags->getCustomDiagID((DiagnosticIDs::Level)L, StringRef(FormatString, N - 1)); diff --git a/clang/include/clang/Basic/DiagnosticCategories.h b/clang/include/clang/Basic/DiagnosticCategories.h index 14be326..839f8de 100644 --- a/clang/include/clang/Basic/DiagnosticCategories.h +++ b/clang/include/clang/Basic/DiagnosticCategories.h @@ -21,11 +21,12 @@ namespace clang { }; enum class Group { -#define DIAG_ENTRY(GroupName, FlagNameOffset, Members, SubGroups, Docs) \ - GroupName, +#define DIAG_ENTRY(GroupName, FlagNameOffset, Members, SubGroups, Docs) \ + GroupName, #include "clang/Basic/DiagnosticGroups.inc" #undef CATEGORY #undef DIAG_ENTRY + NUM_GROUPS }; } // end namespace diag } // end namespace clang diff --git a/clang/include/clang/Basic/DiagnosticIDs.h b/clang/include/clang/Basic/DiagnosticIDs.h index 8b976bd..daad66f 100644 --- a/clang/include/clang/Basic/DiagnosticIDs.h +++ b/clang/include/clang/Basic/DiagnosticIDs.h @@ -14,9 +14,11 @@ #ifndef LLVM_CLANG_BASIC_DIAGNOSTICIDS_H #define LLVM_CLANG_BASIC_DIAGNOSTICIDS_H +#include "clang/Basic/DiagnosticCategories.h" #include "clang/Basic/LLVM.h" #include "llvm/ADT/IntrusiveRefCntPtr.h" #include "llvm/ADT/StringRef.h" +#include "llvm/Support/ErrorHandling.h" #include <optional> #include <vector> @@ -82,7 +84,7 @@ namespace clang { /// to either Ignore (nothing), Remark (emit a remark), Warning /// (emit a warning) or Error (emit as an error). It allows clients to /// map ERRORs to Error or Fatal (stop emitting diagnostics after this one). - enum class Severity { + enum class Severity : uint8_t { // NOTE: 0 means "uncomputed". Ignored = 1, ///< Do not present this diagnostic, ignore it. Remark = 2, ///< Present this diagnostic as a remark. @@ -179,13 +181,96 @@ public: class DiagnosticIDs : public RefCountedBase<DiagnosticIDs> { public: /// The level of the diagnostic, after it has been through mapping. - enum Level { - Ignored, Note, Remark, Warning, Error, Fatal + enum Level : uint8_t { Ignored, Note, Remark, Warning, Error, Fatal }; + + // Diagnostic classes. + enum Class { + CLASS_INVALID = 0x00, + CLASS_NOTE = 0x01, + CLASS_REMARK = 0x02, + CLASS_WARNING = 0x03, + CLASS_EXTENSION = 0x04, + CLASS_ERROR = 0x05 + }; + + static bool IsCustomDiag(diag::kind Diag) { + return Diag >= diag::DIAG_UPPER_LIMIT; + } + + class CustomDiagDesc { + LLVM_PREFERRED_TYPE(diag::Severity) + unsigned DefaultSeverity : 3; + LLVM_PREFERRED_TYPE(Class) + unsigned DiagClass : 3; + LLVM_PREFERRED_TYPE(bool) + unsigned ShowInSystemHeader : 1; + LLVM_PREFERRED_TYPE(bool) + unsigned ShowInSystemMacro : 1; + LLVM_PREFERRED_TYPE(bool) + unsigned HasGroup : 1; + diag::Group Group; + std::string Description; + + auto get_as_tuple() const { + return std::tuple(DefaultSeverity, DiagClass, ShowInSystemHeader, + ShowInSystemMacro, HasGroup, Group, + std::string_view{Description}); + } + + public: + CustomDiagDesc(diag::Severity DefaultSeverity, std::string Description, + unsigned Class = CLASS_WARNING, + bool ShowInSystemHeader = false, + bool ShowInSystemMacro = false, + std::optional<diag::Group> Group = std::nullopt) + : DefaultSeverity(static_cast<unsigned>(DefaultSeverity)), + DiagClass(Class), ShowInSystemHeader(ShowInSystemHeader), + ShowInSystemMacro(ShowInSystemMacro), HasGroup(Group != std::nullopt), + Group(Group.value_or(diag::Group{})), + Description(std::move(Description)) {} + + std::optional<diag::Group> GetGroup() const { + if (HasGroup) + return Group; + return std::nullopt; + } + + diag::Severity GetDefaultSeverity() const { + return static_cast<diag::Severity>(DefaultSeverity); + } + + Class GetClass() const { return static_cast<Class>(DiagClass); } + std::string_view GetDescription() const { return Description; } + bool ShouldShowInSystemHeader() const { return ShowInSystemHeader; } + + friend bool operator==(const CustomDiagDesc &lhs, + const CustomDiagDesc &rhs) { + return lhs.get_as_tuple() == rhs.get_as_tuple(); + } + + friend bool operator<(const CustomDiagDesc &lhs, + const CustomDiagDesc &rhs) { + return lhs.get_as_tuple() < rhs.get_as_tuple(); + } + }; + + struct GroupInfo { + LLVM_PREFERRED_TYPE(diag::Severity) + unsigned Severity : 3; + LLVM_PREFERRED_TYPE(bool) + unsigned HasNoWarningAsError : 1; }; private: /// Information for uniquing and looking up custom diags. std::unique_ptr<diag::CustomDiagInfo> CustomDiagInfo; + std::unique_ptr<GroupInfo[]> GroupInfos = []() { + auto GIs = std::make_unique<GroupInfo[]>( + static_cast<size_t>(diag::Group::NUM_GROUPS)); + for (size_t i = 0; i != static_cast<size_t>(diag::Group::NUM_GROUPS); ++i) + GIs[i] = {{}, false}; + return GIs; + }(); public: DiagnosticIDs(); @@ -200,7 +285,35 @@ public: // FIXME: Replace this function with a create-only facilty like // createCustomDiagIDFromFormatString() to enforce safe usage. At the time of // writing, nearly all callers of this function were invalid. - unsigned getCustomDiagID(Level L, StringRef FormatString); + unsigned getCustomDiagID(CustomDiagDesc Diag); + + // TODO: Deprecate this once all uses are removed from LLVM + // [[deprecated("Use a CustomDiagDesc instead of a Level")]] + unsigned getCustomDiagID(Level Level, StringRef Message) { + return getCustomDiagID([&]() -> CustomDiagDesc { + switch (Level) { + case DiagnosticIDs::Level::Ignored: + return {diag::Severity::Ignored, std::string(Message), CLASS_WARNING, + /*ShowInSystemHeader*/ true}; + case DiagnosticIDs::Level::Note: + return {diag::Severity::Fatal, std::string(Message), CLASS_NOTE, + /*ShowInSystemHeader*/ true}; + case DiagnosticIDs::Level::Remark: + return {diag::Severity::Remark, std::string(Message), CLASS_REMARK, + /*ShowInSystemHeader*/ true}; + case DiagnosticIDs::Level::Warning: + return {diag::Severity::Warning, std::string(Message), CLASS_WARNING, + /*ShowInSystemHeader*/ true}; + case DiagnosticIDs::Level::Error: + return {diag::Severity::Error, std::string(Message), CLASS_ERROR, + /*ShowInSystemHeader*/ true}; + case DiagnosticIDs::Level::Fatal: + return {diag::Severity::Fatal, std::string(Message), CLASS_ERROR, + /*ShowInSystemHeader*/ true}; + } + llvm_unreachable("Fully covered switch above!"); + }()); + } //===--------------------------------------------------------------------===// // Diagnostic classification and reporting interfaces. @@ -212,35 +325,36 @@ public: /// Return true if the unmapped diagnostic levelof the specified /// diagnostic ID is a Warning or Extension. /// - /// This only works on builtin diagnostics, not custom ones, and is not - /// legal to call on NOTEs. - static bool isBuiltinWarningOrExtension(unsigned DiagID); + /// This is not legal to call on NOTEs. + bool isWarningOrExtension(unsigned DiagID) const; /// Return true if the specified diagnostic is mapped to errors by /// default. - static bool isDefaultMappingAsError(unsigned DiagID); + bool isDefaultMappingAsError(unsigned DiagID) const; /// Get the default mapping for this diagnostic. - static DiagnosticMapping getDefaultMapping(unsigned DiagID); + DiagnosticMapping getDefaultMapping(unsigned DiagID) const; + + void initCustomDiagMapping(DiagnosticMapping &, unsigned DiagID); - /// Determine whether the given built-in diagnostic ID is a Note. - static bool isBuiltinNote(unsigned DiagID); + /// Determine whether the given diagnostic ID is a Note. + bool isNote(unsigned DiagID) const; - /// Determine whether the given built-in diagnostic ID is for an + /// Determine whether the given diagnostic ID is for an /// extension of some sort. - static bool isBuiltinExtensionDiag(unsigned DiagID) { + bool isExtensionDiag(unsigned DiagID) const { bool ignored; - return isBuiltinExtensionDiag(DiagID, ignored); + return isExtensionDiag(DiagID, ignored); } - /// Determine whether the given built-in diagnostic ID is for an + /// Determine whether the given diagnostic ID is for an /// extension of some sort, and whether it is enabled by default. /// /// This also returns EnabledByDefault, which is set to indicate whether the /// diagnostic is ignored by default (in which case -pedantic enables it) or /// treated as a warning/error by default. /// - static bool isBuiltinExtensionDiag(unsigned DiagID, bool &EnabledByDefault); + bool isExtensionDiag(unsigned DiagID, bool &EnabledByDefault) const; /// Given a group ID, returns the flag that toggles the group. /// For example, for Group::DeprecatedDeclarations, returns @@ -250,19 +364,22 @@ public: /// Given a diagnostic group ID, return its documentation. static StringRef getWarningOptionDocumentation(diag::Group GroupID); + void setGroupSeverity(StringRef Group, diag::Severity); + void setGroupNoWarningsAsError(StringRef Group, bool); + /// Given a group ID, returns the flag that toggles the group. /// For example, for "deprecated-declarations", returns /// Group::DeprecatedDeclarations. static std::optional<diag::Group> getGroupForWarningOption(StringRef); /// Return the lowest-level group that contains the specified diagnostic. - static std::optional<diag::Group> getGroupForDiag(unsigned DiagID); + std::optional<diag::Group> getGroupForDiag(unsigned DiagID) const; /// Return the lowest-level warning option that enables the specified /// diagnostic. /// /// If there is no -Wfoo flag that controls the diagnostic, this returns null. - static StringRef getWarningOptionForDiag(unsigned DiagID); + StringRef getWarningOptionForDiag(unsigned DiagID); /// Return the category number that a specified \p DiagID belongs to, /// or 0 if no category. @@ -363,6 +480,8 @@ private: getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc, const DiagnosticsEngine &Diag) const LLVM_READONLY; + Class getDiagClass(unsigned DiagID) const; + /// Used to report a diagnostic that is finally fully formed. /// /// \returns \c true if the diagnostic was emitted, \c false if it was diff --git a/clang/include/clang/Basic/DiagnosticLexKinds.td b/clang/include/clang/Basic/DiagnosticLexKinds.td index fc14bb6..8893702 100644 --- a/clang/include/clang/Basic/DiagnosticLexKinds.td +++ b/clang/include/clang/Basic/DiagnosticLexKinds.td @@ -508,6 +508,8 @@ def note_macro_expansion_here : Note<"expansion of macro %0 requested here">; def ext_pp_opencl_variadic_macros : Extension< "variadic macros are a Clang extension in OpenCL">; +def err_opencl_logical_exclusive_or : Error< + "^^ is a reserved operator in OpenCL">; def ext_pp_gnu_line_directive : Extension< "this style of line directive is a GNU extension">, diff --git a/clang/include/clang/Basic/DiagnosticParseKinds.td b/clang/include/clang/Basic/DiagnosticParseKinds.td index fec2456d..1afadb3 100644 --- a/clang/include/clang/Basic/DiagnosticParseKinds.td +++ b/clang/include/clang/Basic/DiagnosticParseKinds.td @@ -1397,8 +1397,6 @@ def err_modifier_expected_colon : Error<"missing ':' after %0 modifier">; // OpenCL errors. def err_opencl_taking_function_address_parser : Error< "taking address of function is not allowed">; -def err_opencl_logical_exclusive_or : Error< - "^^ is a reserved operator in OpenCL">; // C++ for OpenCL. def err_openclcxx_virtual_function : Error< diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td index 50f27ee..d42558d 100644 --- a/clang/include/clang/Basic/DiagnosticSemaKinds.td +++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td @@ -442,6 +442,10 @@ def warn_deprecated_literal_operator_id: Warning< "is deprecated">, InGroup<DeprecatedLiteralOperator>, DefaultIgnore; def warn_reserved_module_name : Warning< "%0 is a reserved name for a module">, InGroup<ReservedModuleIdentifier>; +def warn_import_implementation_partition_unit_in_interface_unit : Warning< + "importing an implementation partition unit in a module interface is not recommended. " + "Names from %0 may not be reachable">, + InGroup<DiagGroup<"import-implementation-partition-unit-in-interface-unit">>; def warn_parameter_size: Warning< "%0 is a large (%1 bytes) pass-by-value argument; " @@ -2929,9 +2933,15 @@ def ext_constexpr_function_never_constant_expr : ExtWarn< "constant expression">, InGroup<DiagGroup<"invalid-constexpr">>, DefaultError; def err_attr_cond_never_constant_expr : Error< "%0 attribute expression never produces a constant expression">; +def err_diagnose_if_unknown_warning : Error<"unknown warning group '%0'">; def err_diagnose_if_invalid_diagnostic_type : Error< "invalid diagnostic type for 'diagnose_if'; use \"error\" or \"warning\" " "instead">; +def err_diagnose_if_unknown_option : Error<"unknown diagnostic option">; +def err_diagnose_if_expected_equals : Error< + "expected '=' after diagnostic option">; +def err_diagnose_if_unexpected_value : Error< + "unexpected value; use 'true' or 'false'">; def err_constexpr_body_no_return : Error< "no return statement in %select{constexpr|consteval}0 function">; def err_constexpr_return_missing_expr : Error< diff --git a/clang/include/clang/Basic/TokenKinds.def b/clang/include/clang/Basic/TokenKinds.def index a82ff68..00e150d 100644 --- a/clang/include/clang/Basic/TokenKinds.def +++ b/clang/include/clang/Basic/TokenKinds.def @@ -255,9 +255,6 @@ PUNCTUATOR(at, "@") PUNCTUATOR(lesslessless, "<<<") PUNCTUATOR(greatergreatergreater, ">>>") -// CL support -PUNCTUATOR(caretcaret, "^^") - // C99 6.4.1: Keywords. These turn into kw_* tokens. // Flags allowed: // KEYALL - This is a keyword in all variants of C and C++, or it diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index dc8bfc6..7f12333 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -1894,6 +1894,18 @@ def fprofile_selected_function_group : Visibility<[ClangOption, CC1Option]>, MetaVarName<"<i>">, HelpText<"Partition functions into N groups using -fprofile-function-groups and select only functions in group i to be instrumented. The valid range is 0 to N-1 inclusive">, MarshallingInfoInt<CodeGenOpts<"ProfileSelectedFunctionGroup">>; +def fcodegen_data_generate_EQ : Joined<["-"], "fcodegen-data-generate=">, + Group<f_Group>, Visibility<[ClangOption, CLOption]>, MetaVarName<"<path>">, + HelpText<"Emit codegen data into the object file. LLD for MachO (currently) merges them into the specified <path>.">; +def fcodegen_data_generate : Flag<["-"], "fcodegen-data-generate">, + Group<f_Group>, Visibility<[ClangOption, CLOption]>, Alias<fcodegen_data_generate_EQ>, AliasArgs<["default.cgdata"]>, + HelpText<"Emit codegen data into the object file. LLD for MachO (currently) merges them into default.cgdata.">; +def fcodegen_data_use_EQ : Joined<["-"], "fcodegen-data-use=">, + Group<f_Group>, Visibility<[ClangOption, CLOption]>, MetaVarName<"<path>">, + HelpText<"Use codegen data read from the specified <path>.">; +def fcodegen_data_use : Flag<["-"], "fcodegen-data-use">, + Group<f_Group>, Visibility<[ClangOption, CLOption]>, Alias<fcodegen_data_use_EQ>, AliasArgs<["default.cgdata"]>, + HelpText<"Use codegen data read from default.cgdata to optimize the binary">; def fswift_async_fp_EQ : Joined<["-"], "fswift-async-fp=">, Group<f_Group>, Visibility<[ClangOption, CC1Option, CC1AsOption, CLOption]>, diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h index 50d5d25..1a9bef0 100644 --- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h +++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h @@ -36,7 +36,7 @@ DefinedOrUnknownSVal getDynamicElementCount(ProgramStateRef State, /// Set the dynamic extent \p Extent of the region \p MR. ProgramStateRef setDynamicExtent(ProgramStateRef State, const MemRegion *MR, - DefinedOrUnknownSVal Extent, SValBuilder &SVB); + DefinedOrUnknownSVal Extent); /// Get the dynamic extent for a symbolic value that represents a buffer. If /// there is an offsetting to the underlying buffer we consider that too. diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h index a560f27..6eedaf0 100644 --- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h +++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h @@ -215,17 +215,17 @@ public: /// Conjure a symbol representing heap allocated memory region. /// /// Note, the expression should represent a location. - DefinedOrUnknownSVal getConjuredHeapSymbolVal(const Expr *E, - const LocationContext *LCtx, - unsigned Count); + DefinedSVal getConjuredHeapSymbolVal(const Expr *E, + const LocationContext *LCtx, + unsigned Count); /// Conjure a symbol representing heap allocated memory region. /// /// Note, now, the expression *doesn't* need to represent a location. /// But the type need to! - DefinedOrUnknownSVal getConjuredHeapSymbolVal(const Expr *E, - const LocationContext *LCtx, - QualType type, unsigned Count); + DefinedSVal getConjuredHeapSymbolVal(const Expr *E, + const LocationContext *LCtx, + QualType type, unsigned Count); /// Create an SVal representing the result of an alloca()-like call, that is, /// an AllocaRegion on the stack. diff --git a/clang/lib/AST/APValue.cpp b/clang/lib/AST/APValue.cpp index d8e33ff..4f5d14c 100644 --- a/clang/lib/AST/APValue.cpp +++ b/clang/lib/AST/APValue.cpp @@ -947,7 +947,6 @@ std::string APValue::getAsString(const ASTContext &Ctx, QualType Ty) const { std::string Result; llvm::raw_string_ostream Out(Result); printPretty(Out, Ctx, Ty); - Out.flush(); return Result; } diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp index 5247b8c..7e0775a 100644 --- a/clang/lib/AST/ByteCode/Compiler.cpp +++ b/clang/lib/AST/ByteCode/Compiler.cpp @@ -2638,18 +2638,46 @@ bool Compiler<Emitter>::VisitCXXReinterpretCastExpr( const CXXReinterpretCastExpr *E) { const Expr *SubExpr = E->getSubExpr(); - bool Fatal = false; std::optional<PrimType> FromT = classify(SubExpr); std::optional<PrimType> ToT = classify(E); + if (!FromT || !ToT) - Fatal = true; - else - Fatal = (ToT != FromT); + return this->emitInvalidCast(CastKind::Reinterpret, /*Fatal=*/true, E); + + if (FromT == PT_Ptr || ToT == PT_Ptr) { + // Both types could be PT_Ptr because their expressions are glvalues. + std::optional<PrimType> PointeeFromT; + if (SubExpr->getType()->isPointerOrReferenceType()) + PointeeFromT = classify(SubExpr->getType()->getPointeeType()); + else + PointeeFromT = classify(SubExpr->getType()); + + std::optional<PrimType> PointeeToT; + if (E->getType()->isPointerOrReferenceType()) + PointeeToT = classify(E->getType()->getPointeeType()); + else + PointeeToT = classify(E->getType()); + + bool Fatal = true; + if (PointeeToT && PointeeFromT) { + if (isIntegralType(*PointeeFromT) && isIntegralType(*PointeeToT)) + Fatal = false; + } + + if (!this->emitInvalidCast(CastKind::Reinterpret, Fatal, E)) + return false; + + if (E->getCastKind() == CK_LValueBitCast) + return this->delegate(SubExpr); + return this->VisitCastExpr(E); + } + // Try to actually do the cast. + bool Fatal = (ToT != FromT); if (!this->emitInvalidCast(CastKind::Reinterpret, Fatal, E)) return false; - return this->delegate(SubExpr); + return this->VisitCastExpr(E); } template <class Emitter> diff --git a/clang/lib/AST/DeclPrinter.cpp b/clang/lib/AST/DeclPrinter.cpp index 07be813..0d51fdb 100644 --- a/clang/lib/AST/DeclPrinter.cpp +++ b/clang/lib/AST/DeclPrinter.cpp @@ -629,7 +629,6 @@ static void printExplicitSpecifier(ExplicitSpecifier ES, llvm::raw_ostream &Out, EOut << ")"; } EOut << " "; - EOut.flush(); Out << Proto; } @@ -790,7 +789,6 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { llvm::raw_string_ostream EOut(Proto); FT->getNoexceptExpr()->printPretty(EOut, nullptr, SubPolicy, Indentation, "\n", &Context); - EOut.flush(); Proto += ")"; } } diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp index e10142e..2e463fc 100644 --- a/clang/lib/AST/Expr.cpp +++ b/clang/lib/AST/Expr.cpp @@ -612,7 +612,7 @@ std::string SYCLUniqueStableNameExpr::ComputeName(ASTContext &Context, llvm::raw_string_ostream Out(Buffer); Ctx->mangleCanonicalTypeName(Ty, Out); - return Out.str(); + return Buffer; } PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, @@ -798,7 +798,6 @@ std::string PredefinedExpr::ComputeName(PredefinedIdentKind IK, FD->printQualifiedName(POut, Policy); if (IK == PredefinedIdentKind::Function) { - POut.flush(); Out << Proto; return std::string(Name); } @@ -880,15 +879,12 @@ std::string PredefinedExpr::ComputeName(PredefinedIdentKind IK, } } - TOut.flush(); if (!TemplateParams.empty()) { // remove the trailing comma and space TemplateParams.resize(TemplateParams.size() - 2); POut << " [" << TemplateParams << "]"; } - POut.flush(); - // Print "auto" for all deduced return types. This includes C++1y return // type deduction and lambdas. For trailing return types resolve the // decltype expression. Otherwise print the real type when this is diff --git a/clang/lib/AST/Mangle.cpp b/clang/lib/AST/Mangle.cpp index 75f6e21..4875e85 100644 --- a/clang/lib/AST/Mangle.cpp +++ b/clang/lib/AST/Mangle.cpp @@ -574,9 +574,9 @@ private: std::string BackendBuf; llvm::raw_string_ostream BOS(BackendBuf); - llvm::Mangler::getNameWithPrefix(BOS, FOS.str(), DL); + llvm::Mangler::getNameWithPrefix(BOS, FrontendBuf, DL); - return BOS.str(); + return BackendBuf; } std::string getMangledThunk(const CXXMethodDecl *MD, const ThunkInfo &T, @@ -589,9 +589,9 @@ private: std::string BackendBuf; llvm::raw_string_ostream BOS(BackendBuf); - llvm::Mangler::getNameWithPrefix(BOS, FOS.str(), DL); + llvm::Mangler::getNameWithPrefix(BOS, FrontendBuf, DL); - return BOS.str(); + return BackendBuf; } }; diff --git a/clang/lib/AST/MicrosoftMangle.cpp b/clang/lib/AST/MicrosoftMangle.cpp index 018ab61..7b069c6 100644 --- a/clang/lib/AST/MicrosoftMangle.cpp +++ b/clang/lib/AST/MicrosoftMangle.cpp @@ -1396,7 +1396,7 @@ void MicrosoftCXXNameMangler::mangleNestedName(GlobalDecl GD) { Stream << '_' << Discriminator; if (ParameterDiscriminator) Stream << '_' << ParameterDiscriminator; - return Stream.str(); + return Buffer; }; unsigned Discriminator = BD->getBlockManglingNumber(); diff --git a/clang/lib/AST/StmtViz.cpp b/clang/lib/AST/StmtViz.cpp index 4eb0da8..c863630 100644 --- a/clang/lib/AST/StmtViz.cpp +++ b/clang/lib/AST/StmtViz.cpp @@ -34,15 +34,14 @@ struct DOTGraphTraits<const Stmt*> : public DefaultDOTGraphTraits { static std::string getNodeLabel(const Stmt* Node, const Stmt* Graph) { #ifndef NDEBUG - std::string OutSStr; - llvm::raw_string_ostream Out(OutSStr); + std::string OutStr; + llvm::raw_string_ostream Out(OutStr); if (Node) Out << Node->getStmtClassName(); else Out << "<NULL>"; - std::string OutStr = Out.str(); if (OutStr[0] == '\n') OutStr.erase(OutStr.begin()); // Process string output to make it nicer... diff --git a/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/clang/lib/ASTMatchers/Dynamic/Registry.cpp index 2c75e6b..8d36ad5 100644 --- a/clang/lib/ASTMatchers/Dynamic/Registry.cpp +++ b/clang/lib/ASTMatchers/Dynamic/Registry.cpp @@ -791,7 +791,7 @@ Registry::getMatcherCompletions(ArrayRef<ArgKind> AcceptedTypes) { TypedText += "\""; } - Completions.emplace_back(TypedText, OS.str(), MaxSpecificity); + Completions.emplace_back(TypedText, Decl, MaxSpecificity); } } diff --git a/clang/lib/Analysis/CFG.cpp b/clang/lib/Analysis/CFG.cpp index 64e6155..f678ac6 100644 --- a/clang/lib/Analysis/CFG.cpp +++ b/clang/lib/Analysis/CFG.cpp @@ -6164,7 +6164,7 @@ void CFGBlock::printTerminatorJson(raw_ostream &Out, const LangOptions &LO, printTerminator(TempOut, LO); - Out << JsonFormat(TempOut.str(), AddQuotes); + Out << JsonFormat(Buf, AddQuotes); } // Returns true if by simply looking at the block, we can be sure that it @@ -6345,10 +6345,9 @@ struct DOTGraphTraits<const CFG*> : public DefaultDOTGraphTraits { DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} static std::string getNodeLabel(const CFGBlock *Node, const CFG *Graph) { - std::string OutSStr; - llvm::raw_string_ostream Out(OutSStr); + std::string OutStr; + llvm::raw_string_ostream Out(OutStr); print_block(Out,Graph, *Node, *GraphHelper, false, false); - std::string& OutStr = Out.str(); if (OutStr[0] == '\n') OutStr.erase(OutStr.begin()); diff --git a/clang/lib/Basic/Diagnostic.cpp b/clang/lib/Basic/Diagnostic.cpp index 66776da..ecff80a 100644 --- a/clang/lib/Basic/Diagnostic.cpp +++ b/clang/lib/Basic/Diagnostic.cpp @@ -138,7 +138,7 @@ void DiagnosticsEngine::Reset(bool soft /*=false*/) { // Create a DiagState and DiagStatePoint representing diagnostic changes // through command-line. - DiagStates.emplace_back(); + DiagStates.emplace_back(*Diags); DiagStatesByLoc.appendFirst(&DiagStates.back()); } } @@ -166,8 +166,11 @@ DiagnosticsEngine::DiagState::getOrAddMapping(diag::kind Diag) { DiagMap.insert(std::make_pair(Diag, DiagnosticMapping())); // Initialize the entry if we added it. - if (Result.second) - Result.first->second = DiagnosticIDs::getDefaultMapping(Diag); + if (Result.second) { + Result.first->second = DiagIDs.getDefaultMapping(Diag); + if (DiagnosticIDs::IsCustomDiag(Diag)) + DiagIDs.initCustomDiagMapping(Result.first->second, Diag); + } return Result.first->second; } @@ -309,7 +312,8 @@ void DiagnosticsEngine::DiagStateMap::dump(SourceManager &SrcMgr, for (auto &Mapping : *Transition.State) { StringRef Option = - DiagnosticIDs::getWarningOptionForDiag(Mapping.first); + SrcMgr.getDiagnostics().Diags->getWarningOptionForDiag( + Mapping.first); if (!DiagName.empty() && DiagName != Option) continue; @@ -353,9 +357,7 @@ void DiagnosticsEngine::PushDiagStatePoint(DiagState *State, void DiagnosticsEngine::setSeverity(diag::kind Diag, diag::Severity Map, SourceLocation L) { - assert(Diag < diag::DIAG_UPPER_LIMIT && - "Can only map builtin diagnostics"); - assert((Diags->isBuiltinWarningOrExtension(Diag) || + assert((Diags->isWarningOrExtension(Diag) || (Map == diag::Severity::Fatal || Map == diag::Severity::Error)) && "Cannot map errors into warnings!"); assert((L.isInvalid() || SourceMgr) && "No SourceMgr for valid location"); @@ -407,6 +409,8 @@ bool DiagnosticsEngine::setSeverityForGroup(diag::Flavor Flavor, if (Diags->getDiagnosticsInGroup(Flavor, Group, GroupDiags)) return true; + Diags->setGroupSeverity(Group, Map); + // Set the mapping. for (diag::kind Diag : GroupDiags) setSeverity(Diag, Map, Loc); @@ -429,6 +433,7 @@ bool DiagnosticsEngine::setDiagnosticGroupWarningAsError(StringRef Group, if (Enabled) return setSeverityForGroup(diag::Flavor::WarningOrError, Group, diag::Severity::Error); + Diags->setGroupSeverity(Group, diag::Severity::Warning); // Otherwise, we want to set the diagnostic mapping's "no Werror" bit, and // potentially downgrade anything already mapped to be a warning. @@ -460,6 +465,7 @@ bool DiagnosticsEngine::setDiagnosticGroupErrorAsFatal(StringRef Group, if (Enabled) return setSeverityForGroup(diag::Flavor::WarningOrError, Group, diag::Severity::Fatal); + Diags->setGroupSeverity(Group, diag::Severity::Error); // Otherwise, we want to set the diagnostic mapping's "no Wfatal-errors" bit, // and potentially downgrade anything already mapped to be a fatal error. @@ -492,7 +498,7 @@ void DiagnosticsEngine::setSeverityForAll(diag::Flavor Flavor, // Set the mapping. for (diag::kind Diag : AllDiags) - if (Diags->isBuiltinWarningOrExtension(Diag)) + if (Diags->isWarningOrExtension(Diag)) setSeverity(Diag, Map, Loc); } diff --git a/clang/lib/Basic/DiagnosticIDs.cpp b/clang/lib/Basic/DiagnosticIDs.cpp index cd42573..cae6642 100644 --- a/clang/lib/Basic/DiagnosticIDs.cpp +++ b/clang/lib/Basic/DiagnosticIDs.cpp @@ -102,13 +102,12 @@ const uint32_t StaticDiagInfoDescriptionOffsets[] = { #undef DIAG }; -// Diagnostic classes. enum DiagnosticClass { - CLASS_NOTE = 0x01, - CLASS_REMARK = 0x02, - CLASS_WARNING = 0x03, - CLASS_EXTENSION = 0x04, - CLASS_ERROR = 0x05 + CLASS_NOTE = DiagnosticIDs::CLASS_NOTE, + CLASS_REMARK = DiagnosticIDs::CLASS_REMARK, + CLASS_WARNING = DiagnosticIDs::CLASS_WARNING, + CLASS_EXTENSION = DiagnosticIDs::CLASS_EXTENSION, + CLASS_ERROR = DiagnosticIDs::CLASS_ERROR, }; struct StaticDiagInfoRec { @@ -269,11 +268,60 @@ CATEGORY(INSTALLAPI, REFACTORING) return Found; } -DiagnosticMapping DiagnosticIDs::getDefaultMapping(unsigned DiagID) { +//===----------------------------------------------------------------------===// +// Custom Diagnostic information +//===----------------------------------------------------------------------===// + +namespace clang { +namespace diag { +using CustomDiagDesc = DiagnosticIDs::CustomDiagDesc; +class CustomDiagInfo { + std::vector<CustomDiagDesc> DiagInfo; + std::map<CustomDiagDesc, unsigned> DiagIDs; + std::map<diag::Group, std::vector<unsigned>> GroupToDiags; + +public: + /// getDescription - Return the description of the specified custom + /// diagnostic. + const CustomDiagDesc &getDescription(unsigned DiagID) const { + assert(DiagID - DIAG_UPPER_LIMIT < DiagInfo.size() && + "Invalid diagnostic ID"); + return DiagInfo[DiagID - DIAG_UPPER_LIMIT]; + } + + unsigned getOrCreateDiagID(DiagnosticIDs::CustomDiagDesc D) { + // Check to see if it already exists. + std::map<CustomDiagDesc, unsigned>::iterator I = DiagIDs.lower_bound(D); + if (I != DiagIDs.end() && I->first == D) + return I->second; + + // If not, assign a new ID. + unsigned ID = DiagInfo.size() + DIAG_UPPER_LIMIT; + DiagIDs.insert(std::make_pair(D, ID)); + DiagInfo.push_back(D); + if (auto Group = D.GetGroup()) + GroupToDiags[*Group].emplace_back(ID); + return ID; + } + + ArrayRef<unsigned> getDiagsInGroup(diag::Group G) const { + if (auto Diags = GroupToDiags.find(G); Diags != GroupToDiags.end()) + return Diags->second; + return {}; + } +}; + +} // namespace diag +} // namespace clang + +DiagnosticMapping DiagnosticIDs::getDefaultMapping(unsigned DiagID) const { DiagnosticMapping Info = DiagnosticMapping::Make( diag::Severity::Fatal, /*IsUser=*/false, /*IsPragma=*/false); - if (const StaticDiagInfoRec *StaticInfo = GetDiagInfo(DiagID)) { + if (IsCustomDiag(DiagID)) { + Info.setSeverity( + CustomDiagInfo->getDescription(DiagID).GetDefaultSeverity()); + } else if (const StaticDiagInfoRec *StaticInfo = GetDiagInfo(DiagID)) { Info.setSeverity((diag::Severity)StaticInfo->DefaultSeverity); if (StaticInfo->WarnNoWerror) { @@ -286,6 +334,18 @@ DiagnosticMapping DiagnosticIDs::getDefaultMapping(unsigned DiagID) { return Info; } +void DiagnosticIDs::initCustomDiagMapping(DiagnosticMapping &Mapping, + unsigned DiagID) { + assert(IsCustomDiag(DiagID)); + const auto &Diag = CustomDiagInfo->getDescription(DiagID); + if (auto Group = Diag.GetGroup()) { + GroupInfo GroupInfo = GroupInfos[static_cast<size_t>(*Group)]; + if (static_cast<diag::Severity>(GroupInfo.Severity) != diag::Severity()) + Mapping.setSeverity(static_cast<diag::Severity>(GroupInfo.Severity)); + Mapping.setNoWarningAsError(GroupInfo.HasNoWarningAsError); + } +} + /// getCategoryNumberForDiag - Return the category number that a specified /// DiagID belongs to, or 0 if no category. unsigned DiagnosticIDs::getCategoryNumberForDiag(unsigned DiagID) { @@ -343,61 +403,6 @@ bool DiagnosticIDs::isDeferrable(unsigned DiagID) { return false; } -/// getBuiltinDiagClass - Return the class field of the diagnostic. -/// -static unsigned getBuiltinDiagClass(unsigned DiagID) { - if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID)) - return Info->Class; - return ~0U; -} - -//===----------------------------------------------------------------------===// -// Custom Diagnostic information -//===----------------------------------------------------------------------===// - -namespace clang { - namespace diag { - class CustomDiagInfo { - typedef std::pair<DiagnosticIDs::Level, std::string> DiagDesc; - std::vector<DiagDesc> DiagInfo; - std::map<DiagDesc, unsigned> DiagIDs; - public: - - /// getDescription - Return the description of the specified custom - /// diagnostic. - StringRef getDescription(unsigned DiagID) const { - assert(DiagID - DIAG_UPPER_LIMIT < DiagInfo.size() && - "Invalid diagnostic ID"); - return DiagInfo[DiagID-DIAG_UPPER_LIMIT].second; - } - - /// getLevel - Return the level of the specified custom diagnostic. - DiagnosticIDs::Level getLevel(unsigned DiagID) const { - assert(DiagID - DIAG_UPPER_LIMIT < DiagInfo.size() && - "Invalid diagnostic ID"); - return DiagInfo[DiagID-DIAG_UPPER_LIMIT].first; - } - - unsigned getOrCreateDiagID(DiagnosticIDs::Level L, StringRef Message, - DiagnosticIDs &Diags) { - DiagDesc D(L, std::string(Message)); - // Check to see if it already exists. - std::map<DiagDesc, unsigned>::iterator I = DiagIDs.lower_bound(D); - if (I != DiagIDs.end() && I->first == D) - return I->second; - - // If not, assign a new ID. - unsigned ID = DiagInfo.size()+DIAG_UPPER_LIMIT; - DiagIDs.insert(std::make_pair(D, ID)); - DiagInfo.push_back(D); - return ID; - } - }; - - } // end diag namespace -} // end clang namespace - - //===----------------------------------------------------------------------===// // Common Diagnostic implementation //===----------------------------------------------------------------------===// @@ -412,38 +417,32 @@ DiagnosticIDs::~DiagnosticIDs() {} /// /// \param FormatString A fixed diagnostic format string that will be hashed and /// mapped to a unique DiagID. -unsigned DiagnosticIDs::getCustomDiagID(Level L, StringRef FormatString) { +unsigned DiagnosticIDs::getCustomDiagID(CustomDiagDesc Diag) { if (!CustomDiagInfo) CustomDiagInfo.reset(new diag::CustomDiagInfo()); - return CustomDiagInfo->getOrCreateDiagID(L, FormatString, *this); + return CustomDiagInfo->getOrCreateDiagID(Diag); } - -/// isBuiltinWarningOrExtension - Return true if the unmapped diagnostic -/// level of the specified diagnostic ID is a Warning or Extension. -/// This only works on builtin diagnostics, not custom ones, and is not legal to -/// call on NOTEs. -bool DiagnosticIDs::isBuiltinWarningOrExtension(unsigned DiagID) { - return DiagID < diag::DIAG_UPPER_LIMIT && - getBuiltinDiagClass(DiagID) != CLASS_ERROR; +bool DiagnosticIDs::isWarningOrExtension(unsigned DiagID) const { + return DiagID < diag::DIAG_UPPER_LIMIT + ? getDiagClass(DiagID) != CLASS_ERROR + : CustomDiagInfo->getDescription(DiagID).GetClass() != CLASS_ERROR; } /// Determine whether the given built-in diagnostic ID is a /// Note. -bool DiagnosticIDs::isBuiltinNote(unsigned DiagID) { - return DiagID < diag::DIAG_UPPER_LIMIT && - getBuiltinDiagClass(DiagID) == CLASS_NOTE; +bool DiagnosticIDs::isNote(unsigned DiagID) const { + return DiagID < diag::DIAG_UPPER_LIMIT && getDiagClass(DiagID) == CLASS_NOTE; } -/// isBuiltinExtensionDiag - Determine whether the given built-in diagnostic +/// isExtensionDiag - Determine whether the given built-in diagnostic /// ID is for an extension of some sort. This also returns EnabledByDefault, /// which is set to indicate whether the diagnostic is ignored by default (in /// which case -pedantic enables it) or treated as a warning/error by default. /// -bool DiagnosticIDs::isBuiltinExtensionDiag(unsigned DiagID, - bool &EnabledByDefault) { - if (DiagID >= diag::DIAG_UPPER_LIMIT || - getBuiltinDiagClass(DiagID) != CLASS_EXTENSION) +bool DiagnosticIDs::isExtensionDiag(unsigned DiagID, + bool &EnabledByDefault) const { + if (IsCustomDiag(DiagID) || getDiagClass(DiagID) != CLASS_EXTENSION) return false; EnabledByDefault = @@ -451,10 +450,7 @@ bool DiagnosticIDs::isBuiltinExtensionDiag(unsigned DiagID, return true; } -bool DiagnosticIDs::isDefaultMappingAsError(unsigned DiagID) { - if (DiagID >= diag::DIAG_UPPER_LIMIT) - return false; - +bool DiagnosticIDs::isDefaultMappingAsError(unsigned DiagID) const { return getDefaultMapping(DiagID).getSeverity() >= diag::Severity::Error; } @@ -464,7 +460,7 @@ StringRef DiagnosticIDs::getDescription(unsigned DiagID) const { if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID)) return Info->getDescription(); assert(CustomDiagInfo && "Invalid CustomDiagInfo"); - return CustomDiagInfo->getDescription(DiagID); + return CustomDiagInfo->getDescription(DiagID).GetDescription(); } static DiagnosticIDs::Level toLevel(diag::Severity SV) { @@ -489,13 +485,7 @@ static DiagnosticIDs::Level toLevel(diag::Severity SV) { DiagnosticIDs::Level DiagnosticIDs::getDiagnosticLevel(unsigned DiagID, SourceLocation Loc, const DiagnosticsEngine &Diag) const { - // Handle custom diagnostics, which cannot be mapped. - if (DiagID >= diag::DIAG_UPPER_LIMIT) { - assert(CustomDiagInfo && "Invalid CustomDiagInfo"); - return CustomDiagInfo->getLevel(DiagID); - } - - unsigned DiagClass = getBuiltinDiagClass(DiagID); + unsigned DiagClass = getDiagClass(DiagID); if (DiagClass == CLASS_NOTE) return DiagnosticIDs::Note; return toLevel(getDiagnosticSeverity(DiagID, Loc, Diag)); } @@ -509,7 +499,8 @@ DiagnosticIDs::getDiagnosticLevel(unsigned DiagID, SourceLocation Loc, diag::Severity DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc, const DiagnosticsEngine &Diag) const { - assert(getBuiltinDiagClass(DiagID) != CLASS_NOTE); + bool IsCustomDiag = DiagnosticIDs::IsCustomDiag(DiagID); + assert(getDiagClass(DiagID) != CLASS_NOTE); // Specific non-error diagnostics may be mapped to various levels from ignored // to error. Errors can only be mapped to fatal. @@ -517,7 +508,7 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc, // Get the mapping information, or compute it lazily. DiagnosticsEngine::DiagState *State = Diag.GetDiagStateForLoc(Loc); - DiagnosticMapping &Mapping = State->getOrAddMapping((diag::kind)DiagID); + DiagnosticMapping Mapping = State->getOrAddMapping((diag::kind)DiagID); // TODO: Can a null severity really get here? if (Mapping.getSeverity() != diag::Severity()) @@ -525,14 +516,15 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc, // Upgrade ignored diagnostics if -Weverything is enabled. if (State->EnableAllWarnings && Result == diag::Severity::Ignored && - !Mapping.isUser() && getBuiltinDiagClass(DiagID) != CLASS_REMARK) + !Mapping.isUser() && + (IsCustomDiag || getDiagClass(DiagID) != CLASS_REMARK)) Result = diag::Severity::Warning; // Ignore -pedantic diagnostics inside __extension__ blocks. // (The diagnostics controlled by -pedantic are the extension diagnostics // that are not enabled by default.) bool EnabledByDefault = false; - bool IsExtensionDiag = isBuiltinExtensionDiag(DiagID, EnabledByDefault); + bool IsExtensionDiag = isExtensionDiag(DiagID, EnabledByDefault); if (Diag.AllExtensionsSilenced && IsExtensionDiag && !EnabledByDefault) return diag::Severity::Ignored; @@ -550,10 +542,12 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc, // as well as disabling all messages which are currently mapped to Warning // (whether by default or downgraded from Error via e.g. -Wno-error or #pragma // diagnostic.) + // FIXME: Should -w be ignored for custom warnings without a group? if (State->IgnoreAllWarnings) { - if (Result == diag::Severity::Warning || - (Result >= diag::Severity::Error && - !isDefaultMappingAsError((diag::kind)DiagID))) + if ((!IsCustomDiag || CustomDiagInfo->getDescription(DiagID).GetGroup()) && + (Result == diag::Severity::Warning || + (Result >= diag::Severity::Error && + !isDefaultMappingAsError((diag::kind)DiagID)))) return diag::Severity::Ignored; } @@ -575,9 +569,10 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc, Diag.CurDiagID != diag::fatal_too_many_errors && Diag.FatalsAsError) Result = diag::Severity::Error; - // Custom diagnostics always are emitted in system headers. bool ShowInSystemHeader = - !GetDiagInfo(DiagID) || GetDiagInfo(DiagID)->WarnShowInSystemHeader; + IsCustomDiag + ? CustomDiagInfo->getDescription(DiagID).ShouldShowInSystemHeader() + : !GetDiagInfo(DiagID) || GetDiagInfo(DiagID)->WarnShowInSystemHeader; // If we are in a system header, we ignore it. We look at the diagnostic class // because we also want to ignore extensions and warnings in -Werror and @@ -597,6 +592,15 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc, return Result; } +DiagnosticIDs::Class DiagnosticIDs::getDiagClass(unsigned DiagID) const { + if (IsCustomDiag(DiagID)) + return Class(CustomDiagInfo->getDescription(DiagID).GetClass()); + + if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID)) + return Class(Info->Class); + return CLASS_INVALID; +} + #define GET_DIAG_ARRAYS #include "clang/Basic/DiagnosticGroups.inc" #undef GET_DIAG_ARRAYS @@ -642,7 +646,12 @@ DiagnosticIDs::getGroupForWarningOption(StringRef Name) { return static_cast<diag::Group>(Found - OptionTable); } -std::optional<diag::Group> DiagnosticIDs::getGroupForDiag(unsigned DiagID) { +std::optional<diag::Group> +DiagnosticIDs::getGroupForDiag(unsigned DiagID) const { + if (IsCustomDiag(DiagID)) { + assert(CustomDiagInfo); + return CustomDiagInfo->getDescription(DiagID).GetGroup(); + } if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID)) return static_cast<diag::Group>(Info->getOptionGroupIndex()); return std::nullopt; @@ -673,7 +682,8 @@ std::vector<std::string> DiagnosticIDs::getDiagnosticFlags() { /// were filtered out due to having the wrong flavor. static bool getDiagnosticsInGroup(diag::Flavor Flavor, const WarningOption *Group, - SmallVectorImpl<diag::kind> &Diags) { + SmallVectorImpl<diag::kind> &Diags, + diag::CustomDiagInfo *CustomDiagInfo) { // An empty group is considered to be a warning group: we have empty groups // for GCC compatibility, and GCC does not have remarks. if (!Group->Members && !Group->SubGroups) @@ -692,9 +702,14 @@ static bool getDiagnosticsInGroup(diag::Flavor Flavor, // Add the members of the subgroups. const int16_t *SubGroups = DiagSubGroups + Group->SubGroups; - for (; *SubGroups != (int16_t)-1; ++SubGroups) + for (; *SubGroups != (int16_t)-1; ++SubGroups) { + if (CustomDiagInfo) + llvm::copy( + CustomDiagInfo->getDiagsInGroup(static_cast<diag::Group>(*SubGroups)), + std::back_inserter(Diags)); NotFound &= getDiagnosticsInGroup(Flavor, &OptionTable[(short)*SubGroups], - Diags); + Diags, CustomDiagInfo); + } return NotFound; } @@ -702,12 +717,49 @@ static bool getDiagnosticsInGroup(diag::Flavor Flavor, bool DiagnosticIDs::getDiagnosticsInGroup(diag::Flavor Flavor, StringRef Group, SmallVectorImpl<diag::kind> &Diags) const { - if (std::optional<diag::Group> G = getGroupForWarningOption(Group)) - return ::getDiagnosticsInGroup( - Flavor, &OptionTable[static_cast<unsigned>(*G)], Diags); + if (std::optional<diag::Group> G = getGroupForWarningOption(Group)) { + if (CustomDiagInfo) + llvm::copy(CustomDiagInfo->getDiagsInGroup(*G), + std::back_inserter(Diags)); + return ::getDiagnosticsInGroup(Flavor, + &OptionTable[static_cast<unsigned>(*G)], + Diags, CustomDiagInfo.get()); + } return true; } +template <class Func> +static void forEachSubGroupImpl(const WarningOption *Group, Func func) { + for (const int16_t *SubGroups = DiagSubGroups + Group->SubGroups; + *SubGroups != -1; ++SubGroups) { + func(static_cast<size_t>(*SubGroups)); + forEachSubGroupImpl(&OptionTable[*SubGroups], std::move(func)); + } +} + +template <class Func> +static void forEachSubGroup(diag::Group Group, Func func) { + const WarningOption *WarningOpt = &OptionTable[static_cast<size_t>(Group)]; + func(static_cast<size_t>(Group)); + ::forEachSubGroupImpl(WarningOpt, std::move(func)); +} + +void DiagnosticIDs::setGroupSeverity(StringRef Group, diag::Severity Sev) { + if (std::optional<diag::Group> G = getGroupForWarningOption(Group)) { + ::forEachSubGroup(*G, [&](size_t SubGroup) { + GroupInfos[SubGroup].Severity = static_cast<unsigned>(Sev); + }); + } +} + +void DiagnosticIDs::setGroupNoWarningsAsError(StringRef Group, bool Val) { + if (std::optional<diag::Group> G = getGroupForWarningOption(Group)) { + ::forEachSubGroup(*G, [&](size_t SubGroup) { + GroupInfos[static_cast<size_t>(*G)].HasNoWarningAsError = Val; + }); + } +} + void DiagnosticIDs::getAllDiagnostics(diag::Flavor Flavor, std::vector<diag::kind> &Diags) { for (unsigned i = 0; i != StaticDiagInfoSize; ++i) @@ -730,7 +782,7 @@ StringRef DiagnosticIDs::getNearestOption(diag::Flavor Flavor, // Don't suggest groups that are not of this kind. llvm::SmallVector<diag::kind, 8> Diags; - if (::getDiagnosticsInGroup(Flavor, &O, Diags) || Diags.empty()) + if (::getDiagnosticsInGroup(Flavor, &O, Diags, nullptr) || Diags.empty()) continue; if (Distance == BestDistance) { @@ -843,14 +895,8 @@ void DiagnosticIDs::EmitDiag(DiagnosticsEngine &Diag, Level DiagLevel) const { } bool DiagnosticIDs::isUnrecoverable(unsigned DiagID) const { - if (DiagID >= diag::DIAG_UPPER_LIMIT) { - assert(CustomDiagInfo && "Invalid CustomDiagInfo"); - // Custom diagnostics. - return CustomDiagInfo->getLevel(DiagID) >= DiagnosticIDs::Error; - } - // Only errors may be unrecoverable. - if (getBuiltinDiagClass(DiagID) < CLASS_ERROR) + if (getDiagClass(DiagID) < CLASS_ERROR) return false; if (DiagID == diag::err_unavailable || diff --git a/clang/lib/Basic/OperatorPrecedence.cpp b/clang/lib/Basic/OperatorPrecedence.cpp index 02876f1..c4e8fe9 100644 --- a/clang/lib/Basic/OperatorPrecedence.cpp +++ b/clang/lib/Basic/OperatorPrecedence.cpp @@ -52,7 +52,6 @@ prec::Level getBinOpPrecedence(tok::TokenKind Kind, bool GreaterThanIsOperator, case tok::pipeequal: return prec::Assignment; case tok::question: return prec::Conditional; case tok::pipepipe: return prec::LogicalOr; - case tok::caretcaret: case tok::ampamp: return prec::LogicalAnd; case tok::pipe: return prec::InclusiveOr; case tok::caret: return prec::ExclusiveOr; diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp index 7fa6942..d6fdd79 100644 --- a/clang/lib/CodeGen/BackendUtil.cpp +++ b/clang/lib/CodeGen/BackendUtil.cpp @@ -64,7 +64,6 @@ #include "llvm/Transforms/IPO/LowerTypeTests.h" #include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h" #include "llvm/Transforms/InstCombine/InstCombine.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Instrumentation/AddressSanitizer.h" #include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h" #include "llvm/Transforms/Instrumentation/BoundsChecking.h" diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp index 807f988..7a94c4d 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -7815,12 +7815,7 @@ private: const Expr *VarRef = nullptr, bool ForDeviceAddr = false) { if (SkipVarSet.contains(D)) return; - auto It = Info.find(D); - if (It == Info.end()) - It = Info - .insert(std::make_pair( - D, SmallVector<SmallVector<MapInfo, 8>, 4>(Total))) - .first; + auto It = Info.try_emplace(D, Total).first; It->second[Kind].emplace_back( L, MapType, MapModifiers, MotionModifiers, ReturnDevicePointer, IsImplicit, Mapper, VarRef, ForDeviceAddr); diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp index f58b816..502aba2 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -2753,6 +2753,25 @@ void tools::addMachineOutlinerArgs(const Driver &D, addArg(Twine("-enable-machine-outliner=never")); } } + + auto *CodeGenDataGenArg = + Args.getLastArg(options::OPT_fcodegen_data_generate_EQ); + auto *CodeGenDataUseArg = Args.getLastArg(options::OPT_fcodegen_data_use_EQ); + + // We only allow one of them to be specified. + if (CodeGenDataGenArg && CodeGenDataUseArg) + D.Diag(diag::err_drv_argument_not_allowed_with) + << CodeGenDataGenArg->getAsString(Args) + << CodeGenDataUseArg->getAsString(Args); + + // For codegen data gen, the output file is passed to the linker + // while a boolean flag is passed to the LLVM backend. + if (CodeGenDataGenArg) + addArg(Twine("-codegen-data-generate")); + + // For codegen data use, the input file is passed to the LLVM backend. + if (CodeGenDataUseArg) + addArg(Twine("-codegen-data-use-path=") + CodeGenDataUseArg->getValue()); } void tools::addOpenMPDeviceRTL(const Driver &D, diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp index 5e7f929..ebc9ed1 100644 --- a/clang/lib/Driver/ToolChains/Darwin.cpp +++ b/clang/lib/Driver/ToolChains/Darwin.cpp @@ -476,6 +476,13 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args, llvm::sys::path::append(Path, "default.profdata"); CmdArgs.push_back(Args.MakeArgString(Twine("--cs-profile-path=") + Path)); } + + auto *CodeGenDataGenArg = + Args.getLastArg(options::OPT_fcodegen_data_generate_EQ); + if (CodeGenDataGenArg) + CmdArgs.push_back( + Args.MakeArgString(Twine("--codegen-data-generate-path=") + + CodeGenDataGenArg->getValue())); } } @@ -633,6 +640,32 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-mllvm"); CmdArgs.push_back("-enable-linkonceodr-outlining"); + // Propagate codegen data flags to the linker for the LLVM backend. + auto *CodeGenDataGenArg = + Args.getLastArg(options::OPT_fcodegen_data_generate_EQ); + auto *CodeGenDataUseArg = Args.getLastArg(options::OPT_fcodegen_data_use_EQ); + + // We only allow one of them to be specified. + const Driver &D = getToolChain().getDriver(); + if (CodeGenDataGenArg && CodeGenDataUseArg) + D.Diag(diag::err_drv_argument_not_allowed_with) + << CodeGenDataGenArg->getAsString(Args) + << CodeGenDataUseArg->getAsString(Args); + + // For codegen data gen, the output file is passed to the linker + // while a boolean flag is passed to the LLVM backend. + if (CodeGenDataGenArg) { + CmdArgs.push_back("-mllvm"); + CmdArgs.push_back("-codegen-data-generate"); + } + + // For codegen data use, the input file is passed to the LLVM backend. + if (CodeGenDataUseArg) { + CmdArgs.push_back("-mllvm"); + CmdArgs.push_back(Args.MakeArgString(Twine("-codegen-data-use-path=") + + CodeGenDataUseArg->getValue())); + } + // Setup statistics file output. SmallString<128> StatsFile = getStatsFileName(Args, Output, Inputs[0], getToolChain().getDriver()); diff --git a/clang/lib/Format/MacroExpander.cpp b/clang/lib/Format/MacroExpander.cpp index 5768ff3..fd2a168 100644 --- a/clang/lib/Format/MacroExpander.cpp +++ b/clang/lib/Format/MacroExpander.cpp @@ -191,9 +191,10 @@ MacroExpander::expand(FormatToken *ID, auto expandArgument = [&](FormatToken *Tok) -> bool { // If the current token references a parameter, expand the corresponding // argument. - if (Tok->isNot(tok::identifier) || ExpandedArgs.contains(Tok->TokenText)) + if (Tok->isNot(tok::identifier)) + return false; + if (!ExpandedArgs.insert(Tok->TokenText).second) return false; - ExpandedArgs.insert(Tok->TokenText); auto I = Def.ArgMap.find(Tok->TokenText); if (I == Def.ArgMap.end()) return false; diff --git a/clang/lib/Frontend/LogDiagnosticPrinter.cpp b/clang/lib/Frontend/LogDiagnosticPrinter.cpp index 469d1c2..4e963af 100644 --- a/clang/lib/Frontend/LogDiagnosticPrinter.cpp +++ b/clang/lib/Frontend/LogDiagnosticPrinter.cpp @@ -129,7 +129,8 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level, DE.DiagnosticLevel = Level; DE.WarningOption = - std::string(DiagnosticIDs::getWarningOptionForDiag(DE.DiagnosticID)); + std::string(Info.getDiags()->getDiagnosticIDs()->getWarningOptionForDiag( + DE.DiagnosticID)); // Format the message. SmallString<100> MessageStr; @@ -160,4 +161,3 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level, // Record the diagnostic entry. Entries.push_back(DE); } - diff --git a/clang/lib/Frontend/Rewrite/RewriteObjC.cpp b/clang/lib/Frontend/Rewrite/RewriteObjC.cpp index 9db6ddb..fd5e8dc 100644 --- a/clang/lib/Frontend/Rewrite/RewriteObjC.cpp +++ b/clang/lib/Frontend/Rewrite/RewriteObjC.cpp @@ -4111,9 +4111,8 @@ void RewriteObjC::RewriteBlockPointerDecl(NamedDecl *ND) { std::string RewriteObjC::SynthesizeByrefCopyDestroyHelper(VarDecl *VD, int flag) { std::string S; - if (CopyDestroyCache.count(flag)) + if (!CopyDestroyCache.insert(flag).second) return S; - CopyDestroyCache.insert(flag); S = "static void __Block_byref_id_object_copy_"; S += utostr(flag); S += "(void *dst, void *src) {\n"; diff --git a/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp b/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp index 0887b5a..d1db317 100644 --- a/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp +++ b/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp @@ -202,7 +202,7 @@ private: /// Emit the string information for diagnostic flags. unsigned getEmitDiagnosticFlag(DiagnosticsEngine::Level DiagLevel, - unsigned DiagID = 0); + const Diagnostic *Diag = nullptr); unsigned getEmitDiagnosticFlag(StringRef DiagName); @@ -536,11 +536,13 @@ unsigned SDiagsWriter::getEmitCategory(unsigned int category) { } unsigned SDiagsWriter::getEmitDiagnosticFlag(DiagnosticsEngine::Level DiagLevel, - unsigned DiagID) { - if (DiagLevel == DiagnosticsEngine::Note) + const Diagnostic *Diag) { + if (!Diag || DiagLevel == DiagnosticsEngine::Note) return 0; // No flag for notes. - StringRef FlagName = DiagnosticIDs::getWarningOptionForDiag(DiagID); + StringRef FlagName = + Diag->getDiags()->getDiagnosticIDs()->getWarningOptionForDiag( + Diag->getID()); return getEmitDiagnosticFlag(FlagName); } @@ -655,7 +657,7 @@ void SDiagsWriter::EmitDiagnosticMessage(FullSourceLoc Loc, PresumedLoc PLoc, unsigned DiagID = DiagnosticIDs::getCategoryNumberForDiag(Info->getID()); Record.push_back(getEmitCategory(DiagID)); // Emit the diagnostic flag string lazily and get the mapped ID. - Record.push_back(getEmitDiagnosticFlag(Level, Info->getID())); + Record.push_back(getEmitDiagnosticFlag(Level, Info)); } else { Record.push_back(getEmitCategory()); Record.push_back(getEmitDiagnosticFlag(Level)); diff --git a/clang/lib/Frontend/TextDiagnosticPrinter.cpp b/clang/lib/Frontend/TextDiagnosticPrinter.cpp index b2fb762..c2fea3d 100644 --- a/clang/lib/Frontend/TextDiagnosticPrinter.cpp +++ b/clang/lib/Frontend/TextDiagnosticPrinter.cpp @@ -70,13 +70,17 @@ static void printDiagnosticOptions(raw_ostream &OS, // flag it as such. Note that diagnostics could also have been mapped by a // pragma, but we don't currently have a way to distinguish this. if (Level == DiagnosticsEngine::Error && - DiagnosticIDs::isBuiltinWarningOrExtension(Info.getID()) && - !DiagnosticIDs::isDefaultMappingAsError(Info.getID())) { + Info.getDiags()->getDiagnosticIDs()->isWarningOrExtension( + Info.getID()) && + !Info.getDiags()->getDiagnosticIDs()->isDefaultMappingAsError( + Info.getID())) { OS << " [-Werror"; Started = true; } - StringRef Opt = DiagnosticIDs::getWarningOptionForDiag(Info.getID()); + StringRef Opt = + Info.getDiags()->getDiagnosticIDs()->getWarningOptionForDiag( + Info.getID()); if (!Opt.empty()) { OS << (Started ? "," : " [") << (Level == DiagnosticsEngine::Remark ? "-R" : "-W") << Opt; diff --git a/clang/lib/Headers/arm_acle.h b/clang/lib/Headers/arm_acle.h index 1518b0c..b1dc90f 100644 --- a/clang/lib/Headers/arm_acle.h +++ b/clang/lib/Headers/arm_acle.h @@ -264,28 +264,28 @@ __rbitl(unsigned long __t) { } /* 8.3 16-bit multiplications */ -#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP -static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp"))) __smulbb(int32_t __a, int32_t __b) { return __builtin_arm_smulbb(__a, __b); } -static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp"))) __smulbt(int32_t __a, int32_t __b) { return __builtin_arm_smulbt(__a, __b); } -static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp"))) __smultb(int32_t __a, int32_t __b) { return __builtin_arm_smultb(__a, __b); } -static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp"))) __smultt(int32_t __a, int32_t __b) { return __builtin_arm_smultt(__a, __b); } -static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp"))) __smulwb(int32_t __a, int32_t __b) { return __builtin_arm_smulwb(__a, __b); } -static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp"))) __smulwt(int32_t __a, int32_t __b) { return __builtin_arm_smulwt(__a, __b); } @@ -304,46 +304,46 @@ __smulwt(int32_t __a, int32_t __b) { #endif /* 8.4.2 Saturating addition and subtraction intrinsics */ -#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP -static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp"))) __qadd(int32_t __t, int32_t __v) { return __builtin_arm_qadd(__t, __v); } -static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp"))) __qsub(int32_t __t, int32_t __v) { return __builtin_arm_qsub(__t, __v); } -static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp"))) __qdbl(int32_t __t) { return __builtin_arm_qadd(__t, __t); } #endif /* 8.4.3 Accumulating multiplications */ -#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP -static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp"))) __smlabb(int32_t __a, int32_t __b, int32_t __c) { return __builtin_arm_smlabb(__a, __b, __c); } -static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp"))) __smlabt(int32_t __a, int32_t __b, int32_t __c) { return __builtin_arm_smlabt(__a, __b, __c); } -static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp"))) __smlatb(int32_t __a, int32_t __b, int32_t __c) { return __builtin_arm_smlatb(__a, __b, __c); } -static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp"))) __smlatt(int32_t __a, int32_t __b, int32_t __c) { return __builtin_arm_smlatt(__a, __b, __c); } -static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp"))) __smlawb(int32_t __a, int32_t __b, int32_t __c) { return __builtin_arm_smlawb(__a, __b, __c); } -static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp"))) __smlawt(int32_t __a, int32_t __b, int32_t __c) { return __builtin_arm_smlawt(__a, __b, __c); } @@ -621,8 +621,6 @@ __rintnf(float __a) { #endif /* 8.8 CRC32 intrinsics */ -#if (defined(__ARM_FEATURE_CRC32) && __ARM_FEATURE_CRC32) || \ - (defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE) static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) __crc32b(uint32_t __a, uint8_t __b) { return __builtin_arm_crc32b(__a, __b); @@ -662,7 +660,6 @@ static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target __crc32cd(uint32_t __a, uint64_t __b) { return __builtin_arm_crc32cd(__a, __b); } -#endif /* 8.6 Floating-point data-processing intrinsics */ /* Armv8.3-A Javascript conversion intrinsic */ diff --git a/clang/lib/Headers/llvm_libc_wrappers/ctype.h b/clang/lib/Headers/llvm_libc_wrappers/ctype.h index 49c2af9..960cf43 100644 --- a/clang/lib/Headers/llvm_libc_wrappers/ctype.h +++ b/clang/lib/Headers/llvm_libc_wrappers/ctype.h @@ -51,6 +51,19 @@ #pragma push_macro("toascii") #pragma push_macro("tolower") #pragma push_macro("toupper") +#pragma push_macro("isalnum_l") +#pragma push_macro("isalpha_l") +#pragma push_macro("isascii_l") +#pragma push_macro("isblank_l") +#pragma push_macro("iscntrl_l") +#pragma push_macro("isdigit_l") +#pragma push_macro("isgraph_l") +#pragma push_macro("islower_l") +#pragma push_macro("isprint_l") +#pragma push_macro("ispunct_l") +#pragma push_macro("isspace_l") +#pragma push_macro("isupper_l") +#pragma push_macro("isxdigit_l") #undef isalnum #undef isalpha @@ -68,6 +81,18 @@ #undef toascii #undef tolower #undef toupper +#undef isalnum_l +#undef isalpha_l +#undef iscntrl_l +#undef isdigit_l +#undef islower_l +#undef isgraph_l +#undef isprint_l +#undef ispunct_l +#undef isspace_l +#undef isupper_l +#undef isblank_l +#undef isxdigit_l #pragma omp begin declare target @@ -93,6 +118,19 @@ #pragma pop_macro("toascii") #pragma pop_macro("tolower") #pragma pop_macro("toupper") +#pragma pop_macro("isalnum_l") +#pragma pop_macro("isalpha_l") +#pragma pop_macro("isascii_l") +#pragma pop_macro("isblank_l") +#pragma pop_macro("iscntrl_l") +#pragma pop_macro("isdigit_l") +#pragma pop_macro("isgraph_l") +#pragma pop_macro("islower_l") +#pragma pop_macro("isprint_l") +#pragma pop_macro("ispunct_l") +#pragma pop_macro("isspace_l") +#pragma pop_macro("isupper_l") +#pragma pop_macro("isxdigit_l") #endif #undef __LIBC_ATTRS diff --git a/clang/lib/Lex/Lexer.cpp b/clang/lib/Lex/Lexer.cpp index 8647e9f..12cb460 100644 --- a/clang/lib/Lex/Lexer.cpp +++ b/clang/lib/Lex/Lexer.cpp @@ -4325,10 +4325,9 @@ LexStart: if (Char == '=') { CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); Kind = tok::caretequal; - } else if (LangOpts.OpenCL && Char == '^') { - CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); - Kind = tok::caretcaret; } else { + if (LangOpts.OpenCL && Char == '^') + Diag(CurPtr, diag::err_opencl_logical_exclusive_or); Kind = tok::caret; } break; diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp index 6370da1..6f0f5a0 100644 --- a/clang/lib/Parse/ParseDeclCXX.cpp +++ b/clang/lib/Parse/ParseDeclCXX.cpp @@ -1109,7 +1109,8 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd) { } } - T.consumeClose(); + if (T.consumeClose()) + return nullptr; DeclEnd = Tok.getLocation(); ExpectAndConsumeSemi(diag::err_expected_semi_after_static_assert, TokName); diff --git a/clang/lib/Parse/ParseExpr.cpp b/clang/lib/Parse/ParseExpr.cpp index 64f284d..e751450 100644 --- a/clang/lib/Parse/ParseExpr.cpp +++ b/clang/lib/Parse/ParseExpr.cpp @@ -446,10 +446,6 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) { Token OpToken = Tok; ConsumeToken(); - if (OpToken.is(tok::caretcaret)) { - return ExprError(Diag(Tok, diag::err_opencl_logical_exclusive_or)); - } - // If we're potentially in a template-id, we may now be able to determine // whether we're actually in one or not. if (OpToken.isOneOf(tok::comma, tok::greater, tok::greatergreater, diff --git a/clang/lib/Sema/CheckExprLifetime.cpp b/clang/lib/Sema/CheckExprLifetime.cpp index f62e185..c98fbca 100644 --- a/clang/lib/Sema/CheckExprLifetime.cpp +++ b/clang/lib/Sema/CheckExprLifetime.cpp @@ -269,7 +269,8 @@ static bool isInStlNamespace(const Decl *D) { static bool shouldTrackImplicitObjectArg(const CXXMethodDecl *Callee) { if (auto *Conv = dyn_cast_or_null<CXXConversionDecl>(Callee)) - if (isRecordWithAttr<PointerAttr>(Conv->getConversionType())) + if (isRecordWithAttr<PointerAttr>(Conv->getConversionType()) && + Callee->getParent()->hasAttr<OwnerAttr>()) return true; if (!isInStlNamespace(Callee->getParent())) return false; diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp index 46ddd36..d567de7 100644 --- a/clang/lib/Sema/Sema.cpp +++ b/clang/lib/Sema/Sema.cpp @@ -1683,7 +1683,7 @@ void Sema::EmitCurrentDiagnostic(unsigned DiagID) { // that is different from the last template instantiation where // we emitted an error, print a template instantiation // backtrace. - if (!DiagnosticIDs::isBuiltinNote(DiagID)) + if (!Diags.getDiagnosticIDs()->isNote(DiagID)) PrintContextStack(); } @@ -1697,7 +1697,8 @@ bool Sema::hasUncompilableErrorOccurred() const { if (Loc == DeviceDeferredDiags.end()) return false; for (auto PDAt : Loc->second) { - if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID())) + if (Diags.getDiagnosticIDs()->isDefaultMappingAsError( + PDAt.second.getDiagID())) return true; } return false; diff --git a/clang/lib/Sema/SemaCUDA.cpp b/clang/lib/Sema/SemaCUDA.cpp index ec37c0d..fbb3de4 100644 --- a/clang/lib/Sema/SemaCUDA.cpp +++ b/clang/lib/Sema/SemaCUDA.cpp @@ -835,7 +835,7 @@ SemaBase::SemaDiagnosticBuilder SemaCUDA::DiagIfDeviceCode(SourceLocation Loc, if (!getLangOpts().CUDAIsDevice) return SemaDiagnosticBuilder::K_Nop; if (SemaRef.IsLastErrorImmediate && - getDiagnostics().getDiagnosticIDs()->isBuiltinNote(DiagID)) + getDiagnostics().getDiagnosticIDs()->isNote(DiagID)) return SemaDiagnosticBuilder::K_Immediate; return (SemaRef.getEmissionStatus(CurFunContext) == Sema::FunctionEmissionStatus::Emitted) @@ -866,7 +866,7 @@ Sema::SemaDiagnosticBuilder SemaCUDA::DiagIfHostCode(SourceLocation Loc, if (getLangOpts().CUDAIsDevice) return SemaDiagnosticBuilder::K_Nop; if (SemaRef.IsLastErrorImmediate && - getDiagnostics().getDiagnosticIDs()->isBuiltinNote(DiagID)) + getDiagnostics().getDiagnosticIDs()->isNote(DiagID)) return SemaDiagnosticBuilder::K_Immediate; return (SemaRef.getEmissionStatus(CurFunContext) == Sema::FunctionEmissionStatus::Emitted) diff --git a/clang/lib/Sema/SemaCodeComplete.cpp b/clang/lib/Sema/SemaCodeComplete.cpp index 88d4732..3e31f3d 100644 --- a/clang/lib/Sema/SemaCodeComplete.cpp +++ b/clang/lib/Sema/SemaCodeComplete.cpp @@ -520,7 +520,6 @@ static QualType getPreferredTypeOfBinaryRHS(Sema &S, Expr *LHS, // Logical operators, assume we want bool. case tok::ampamp: case tok::pipepipe: - case tok::caretcaret: return S.getASTContext().BoolTy; // Operators often used for bit manipulation are typically used with the type // of the left argument. diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp index c9b9f3a..14cc51c 100644 --- a/clang/lib/Sema/SemaDeclAttr.cpp +++ b/clang/lib/Sema/SemaDeclAttr.cpp @@ -852,22 +852,38 @@ static void handleDiagnoseIfAttr(Sema &S, Decl *D, const ParsedAttr &AL) { if (!checkFunctionConditionAttr(S, D, AL, Cond, Msg)) return; - StringRef DiagTypeStr; - if (!S.checkStringLiteralArgumentAttr(AL, 2, DiagTypeStr)) + StringRef DefaultSevStr; + if (!S.checkStringLiteralArgumentAttr(AL, 2, DefaultSevStr)) return; - DiagnoseIfAttr::DiagnosticType DiagType; - if (!DiagnoseIfAttr::ConvertStrToDiagnosticType(DiagTypeStr, DiagType)) { + DiagnoseIfAttr::DefaultSeverity DefaultSev; + if (!DiagnoseIfAttr::ConvertStrToDefaultSeverity(DefaultSevStr, DefaultSev)) { S.Diag(AL.getArgAsExpr(2)->getBeginLoc(), diag::err_diagnose_if_invalid_diagnostic_type); return; } + StringRef WarningGroup; + SmallVector<StringRef, 2> Options; + if (AL.getNumArgs() > 3) { + if (!S.checkStringLiteralArgumentAttr(AL, 3, WarningGroup)) + return; + if (WarningGroup.empty() || + !S.getDiagnostics().getDiagnosticIDs()->getGroupForWarningOption( + WarningGroup)) { + S.Diag(AL.getArgAsExpr(3)->getBeginLoc(), + diag::err_diagnose_if_unknown_warning) + << WarningGroup; + return; + } + } + bool ArgDependent = false; if (const auto *FD = dyn_cast<FunctionDecl>(D)) ArgDependent = ArgumentDependenceChecker(FD).referencesArgs(Cond); D->addAttr(::new (S.Context) DiagnoseIfAttr( - S.Context, AL, Cond, Msg, DiagType, ArgDependent, cast<NamedDecl>(D))); + S.Context, AL, Cond, Msg, DefaultSev, WarningGroup, ArgDependent, + cast<NamedDecl>(D))); } static void handleNoBuiltinAttr(Sema &S, Decl *D, const ParsedAttr &AL) { diff --git a/clang/lib/Sema/SemaModule.cpp b/clang/lib/Sema/SemaModule.cpp index 3b84e7b..d6ebc38 100644 --- a/clang/lib/Sema/SemaModule.cpp +++ b/clang/lib/Sema/SemaModule.cpp @@ -650,6 +650,14 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc, else VisibleModules.setVisible(Mod, ImportLoc); + assert((!Mod->isModulePartitionImplementation() || getCurrentModule()) && + "We can only import a partition unit in a named module."); + if (Mod->isModulePartitionImplementation() && + getCurrentModule()->isModuleInterfaceUnit()) + Diag(ImportLoc, + diag::warn_import_implementation_partition_unit_in_interface_unit) + << Mod->Name; + checkModuleImportContext(*this, Mod, ImportLoc, CurContext); // FIXME: we should support importing a submodule within a different submodule diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp index a155bb2..d304f32 100644 --- a/clang/lib/Sema/SemaOverload.cpp +++ b/clang/lib/Sema/SemaOverload.cpp @@ -6567,29 +6567,22 @@ static void collectViableConversionCandidates(Sema &SemaRef, Expr *From, QualType ToType, UnresolvedSetImpl &ViableConversions, OverloadCandidateSet &CandidateSet) { - for (unsigned I = 0, N = ViableConversions.size(); I != N; ++I) { - DeclAccessPair FoundDecl = ViableConversions[I]; + for (const DeclAccessPair &FoundDecl : ViableConversions.pairs()) { NamedDecl *D = FoundDecl.getDecl(); CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext()); if (isa<UsingShadowDecl>(D)) D = cast<UsingShadowDecl>(D)->getTargetDecl(); - CXXConversionDecl *Conv; - FunctionTemplateDecl *ConvTemplate; - if ((ConvTemplate = dyn_cast<FunctionTemplateDecl>(D))) - Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl()); - else - Conv = cast<CXXConversionDecl>(D); - - if (ConvTemplate) + if (auto *ConvTemplate = dyn_cast<FunctionTemplateDecl>(D)) { SemaRef.AddTemplateConversionCandidate( ConvTemplate, FoundDecl, ActingContext, From, ToType, CandidateSet, - /*AllowObjCConversionOnExplicit=*/false, /*AllowExplicit*/ true); - else - SemaRef.AddConversionCandidate(Conv, FoundDecl, ActingContext, From, - ToType, CandidateSet, - /*AllowObjCConversionOnExplicit=*/false, - /*AllowExplicit*/ true); + /*AllowObjCConversionOnExplicit=*/false, /*AllowExplicit=*/true); + continue; + } + CXXConversionDecl *Conv = cast<CXXConversionDecl>(D); + SemaRef.AddConversionCandidate( + Conv, FoundDecl, ActingContext, From, ToType, CandidateSet, + /*AllowObjCConversionOnExplicit=*/false, /*AllowExplicit=*/true); } } @@ -7307,8 +7300,10 @@ static bool diagnoseDiagnoseIfAttrsWith(Sema &S, const NamedDecl *ND, return false; auto WarningBegin = std::stable_partition( - Attrs.begin(), Attrs.end(), - [](const DiagnoseIfAttr *DIA) { return DIA->isError(); }); + Attrs.begin(), Attrs.end(), [](const DiagnoseIfAttr *DIA) { + return DIA->getDefaultSeverity() == DiagnoseIfAttr::DS_error && + DIA->getWarningGroup().empty(); + }); // Note that diagnose_if attributes are late-parsed, so they appear in the // correct order (unlike enable_if attributes). @@ -7322,11 +7317,32 @@ static bool diagnoseDiagnoseIfAttrsWith(Sema &S, const NamedDecl *ND, return true; } + auto ToSeverity = [](DiagnoseIfAttr::DefaultSeverity Sev) { + switch (Sev) { + case DiagnoseIfAttr::DS_warning: + return diag::Severity::Warning; + case DiagnoseIfAttr::DS_error: + return diag::Severity::Error; + } + llvm_unreachable("Fully covered switch above!"); + }; + for (const auto *DIA : llvm::make_range(WarningBegin, Attrs.end())) if (IsSuccessful(DIA)) { - S.Diag(Loc, diag::warn_diagnose_if_succeeded) << DIA->getMessage(); - S.Diag(DIA->getLocation(), diag::note_from_diagnose_if) - << DIA->getParent() << DIA->getCond()->getSourceRange(); + if (DIA->getWarningGroup().empty() && + DIA->getDefaultSeverity() == DiagnoseIfAttr::DS_warning) { + S.Diag(Loc, diag::warn_diagnose_if_succeeded) << DIA->getMessage(); + S.Diag(DIA->getLocation(), diag::note_from_diagnose_if) + << DIA->getParent() << DIA->getCond()->getSourceRange(); + } else { + auto DiagGroup = S.Diags.getDiagnosticIDs()->getGroupForWarningOption( + DIA->getWarningGroup()); + assert(DiagGroup); + auto DiagID = S.Diags.getDiagnosticIDs()->getCustomDiagID( + {ToSeverity(DIA->getDefaultSeverity()), "%0", + DiagnosticIDs::CLASS_WARNING, false, false, *DiagGroup}); + S.Diag(Loc, DiagID) << DIA->getMessage(); + } } return false; diff --git a/clang/lib/Sema/SemaTemplateDeduction.cpp b/clang/lib/Sema/SemaTemplateDeduction.cpp index 562c57a..b50648d 100644 --- a/clang/lib/Sema/SemaTemplateDeduction.cpp +++ b/clang/lib/Sema/SemaTemplateDeduction.cpp @@ -3411,7 +3411,7 @@ DeduceTemplateArguments(Sema &S, T *Partial, if (TemplateDeductionResult Result = ::DeduceTemplateArguments( S, Partial->getTemplateParameters(), Partial->getTemplateArgs().asArray(), TemplateArgs, Info, Deduced, - /*NumberOfArgumentsMustMatch=*/false, /*PartialOrdering=*/true, + /*NumberOfArgumentsMustMatch=*/false, /*PartialOrdering=*/false, PackFold::ParameterToArgument, /*HasDeducedAnyParam=*/nullptr); Result != TemplateDeductionResult::Success) diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp index bb311e3..e97a7d7 100644 --- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp +++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp @@ -284,7 +284,8 @@ static void instantiateDependentDiagnoseIfAttr( if (Cond) New->addAttr(new (S.getASTContext()) DiagnoseIfAttr( S.getASTContext(), *DIA, Cond, DIA->getMessage(), - DIA->getDiagnosticType(), DIA->getArgDependent(), New)); + DIA->getDefaultSeverity(), DIA->getWarningGroup(), + DIA->getArgDependent(), New)); } // Constructs and adds to New a new instance of CUDALaunchBoundsAttr using diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp index e5a1e20..4fae6ff 100644 --- a/clang/lib/Serialization/ASTReader.cpp +++ b/clang/lib/Serialization/ASTReader.cpp @@ -6641,7 +6641,7 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) { // command line (-w, -Weverything, -Werror, ...) along with any explicit // -Wblah flags. unsigned Flags = Record[Idx++]; - DiagState Initial; + DiagState Initial(*Diag.getDiagnosticIDs()); Initial.SuppressSystemWarnings = Flags & 1; Flags >>= 1; Initial.ErrorsAsFatal = Flags & 1; Flags >>= 1; Initial.WarningsAsErrors = Flags & 1; Flags >>= 1; diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp index c628949..008bf57 100644 --- a/clang/lib/Serialization/ASTWriter.cpp +++ b/clang/lib/Serialization/ASTWriter.cpp @@ -3219,7 +3219,7 @@ void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag, // Skip default mappings. We have a mapping for every diagnostic ever // emitted, regardless of whether it was customized. if (!I.second.isPragma() && - I.second == DiagnosticIDs::getDefaultMapping(I.first)) + I.second == Diag.getDiagnosticIDs()->getDefaultMapping(I.first)) continue; Mappings.push_back(I); } diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp index 3ddcb7e..81ec8e1 100644 --- a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp @@ -315,13 +315,24 @@ struct ReallocPair { REGISTER_MAP_WITH_PROGRAMSTATE(ReallocPairs, SymbolRef, ReallocPair) -/// Tells if the callee is one of the builtin new/delete operators, including -/// placement operators and other standard overloads. -static bool isStandardNewDelete(const FunctionDecl *FD); -static bool isStandardNewDelete(const CallEvent &Call) { +static bool isStandardNew(const FunctionDecl *FD); +static bool isStandardNew(const CallEvent &Call) { if (!Call.getDecl() || !isa<FunctionDecl>(Call.getDecl())) return false; - return isStandardNewDelete(cast<FunctionDecl>(Call.getDecl())); + return isStandardNew(cast<FunctionDecl>(Call.getDecl())); +} + +static bool isStandardDelete(const FunctionDecl *FD); +static bool isStandardDelete(const CallEvent &Call) { + if (!Call.getDecl() || !isa<FunctionDecl>(Call.getDecl())) + return false; + return isStandardDelete(cast<FunctionDecl>(Call.getDecl())); +} + +/// Tells if the callee is one of the builtin new/delete operators, including +/// placement operators and other standard overloads. +template <typename T> static bool isStandardNewDelete(const T &FD) { + return isStandardDelete(FD) || isStandardNew(FD); } //===----------------------------------------------------------------------===// @@ -334,8 +345,9 @@ class MallocChecker : public Checker<check::DeadSymbols, check::PointerEscape, check::ConstPointerEscape, check::PreStmt<ReturnStmt>, check::EndFunction, check::PreCall, check::PostCall, - check::NewAllocator, check::PostStmt<BlockExpr>, - check::PostObjCMessage, check::Location, eval::Assume> { + eval::Call, check::NewAllocator, + check::PostStmt<BlockExpr>, check::PostObjCMessage, + check::Location, eval::Assume> { public: /// In pessimistic mode, the checker assumes that it does not know which /// functions might free the memory. @@ -367,6 +379,7 @@ public: void checkPreCall(const CallEvent &Call, CheckerContext &C) const; void checkPostCall(const CallEvent &Call, CheckerContext &C) const; + bool evalCall(const CallEvent &Call, CheckerContext &C) const; void checkNewAllocator(const CXXAllocatorCall &Call, CheckerContext &C) const; void checkPostObjCMessage(const ObjCMethodCall &Call, CheckerContext &C) const; void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const; @@ -403,7 +416,8 @@ private: mutable std::unique_ptr<BugType> BT_TaintedAlloc; #define CHECK_FN(NAME) \ - void NAME(const CallEvent &Call, CheckerContext &C) const; + void NAME(ProgramStateRef State, const CallEvent &Call, CheckerContext &C) \ + const; CHECK_FN(checkFree) CHECK_FN(checkIfNameIndex) @@ -423,11 +437,12 @@ private: CHECK_FN(checkReallocN) CHECK_FN(checkOwnershipAttr) - void checkRealloc(const CallEvent &Call, CheckerContext &C, - bool ShouldFreeOnFail) const; + void checkRealloc(ProgramStateRef State, const CallEvent &Call, + CheckerContext &C, bool ShouldFreeOnFail) const; - using CheckFn = std::function<void(const MallocChecker *, - const CallEvent &Call, CheckerContext &C)>; + using CheckFn = + std::function<void(const MallocChecker *, ProgramStateRef State, + const CallEvent &Call, CheckerContext &C)>; const CallDescriptionMap<CheckFn> PreFnMap{ // NOTE: the following CallDescription also matches the C++ standard @@ -436,6 +451,13 @@ private: {{CDM::CLibrary, {"getdelim"}, 4}, &MallocChecker::preGetdelim}, }; + const CallDescriptionMap<CheckFn> PostFnMap{ + // NOTE: the following CallDescription also matches the C++ standard + // library function std::getline(); the callback will filter it out. + {{CDM::CLibrary, {"getline"}, 3}, &MallocChecker::checkGetdelim}, + {{CDM::CLibrary, {"getdelim"}, 4}, &MallocChecker::checkGetdelim}, + }; + const CallDescriptionMap<CheckFn> FreeingMemFnMap{ {{CDM::CLibrary, {"free"}, 1}, &MallocChecker::checkFree}, {{CDM::CLibrary, {"if_freenameindex"}, 1}, @@ -446,10 +468,13 @@ private: bool isFreeingCall(const CallEvent &Call) const; static bool isFreeingOwnershipAttrCall(const FunctionDecl *Func); + static bool isFreeingOwnershipAttrCall(const CallEvent &Call); + static bool isAllocatingOwnershipAttrCall(const FunctionDecl *Func); + static bool isAllocatingOwnershipAttrCall(const CallEvent &Call); friend class NoMemOwnershipChangeVisitor; - CallDescriptionMap<CheckFn> AllocatingMemFnMap{ + CallDescriptionMap<CheckFn> AllocaMemFnMap{ {{CDM::CLibrary, {"alloca"}, 1}, &MallocChecker::checkAlloca}, {{CDM::CLibrary, {"_alloca"}, 1}, &MallocChecker::checkAlloca}, // The line for "alloca" also covers "__builtin_alloca", but the @@ -457,6 +482,9 @@ private: // extra argument: {{CDM::CLibrary, {"__builtin_alloca_with_align"}, 2}, &MallocChecker::checkAlloca}, + }; + + CallDescriptionMap<CheckFn> AllocatingMemFnMap{ {{CDM::CLibrary, {"malloc"}, 1}, &MallocChecker::checkBasicAlloc}, {{CDM::CLibrary, {"malloc"}, 3}, &MallocChecker::checkKernelMalloc}, {{CDM::CLibrary, {"calloc"}, 2}, &MallocChecker::checkCalloc}, @@ -481,23 +509,20 @@ private: CallDescriptionMap<CheckFn> ReallocatingMemFnMap{ {{CDM::CLibrary, {"realloc"}, 2}, - std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)}, + std::bind(&MallocChecker::checkRealloc, _1, _2, _3, _4, false)}, {{CDM::CLibrary, {"reallocf"}, 2}, - std::bind(&MallocChecker::checkRealloc, _1, _2, _3, true)}, + std::bind(&MallocChecker::checkRealloc, _1, _2, _3, _4, true)}, {{CDM::CLibrary, {"g_realloc"}, 2}, - std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)}, + std::bind(&MallocChecker::checkRealloc, _1, _2, _3, _4, false)}, {{CDM::CLibrary, {"g_try_realloc"}, 2}, - std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)}, + std::bind(&MallocChecker::checkRealloc, _1, _2, _3, _4, false)}, {{CDM::CLibrary, {"g_realloc_n"}, 3}, &MallocChecker::checkReallocN}, {{CDM::CLibrary, {"g_try_realloc_n"}, 3}, &MallocChecker::checkReallocN}, - - // NOTE: the following CallDescription also matches the C++ standard - // library function std::getline(); the callback will filter it out. - {{CDM::CLibrary, {"getline"}, 3}, &MallocChecker::checkGetdelim}, - {{CDM::CLibrary, {"getdelim"}, 4}, &MallocChecker::checkGetdelim}, }; bool isMemCall(const CallEvent &Call) const; + bool hasOwnershipReturns(const CallEvent &Call) const; + bool hasOwnershipTakesHolds(const CallEvent &Call) const; void reportTaintBug(StringRef Msg, ProgramStateRef State, CheckerContext &C, llvm::ArrayRef<SymbolRef> TaintedSyms, AllocationFamily Family) const; @@ -531,8 +556,8 @@ private: /// \param [in] RetVal Specifies the newly allocated pointer value; /// if unspecified, the value of expression \p E is used. [[nodiscard]] static ProgramStateRef - ProcessZeroAllocCheck(const CallEvent &Call, const unsigned IndexOfSizeArg, - ProgramStateRef State, + ProcessZeroAllocCheck(CheckerContext &C, const CallEvent &Call, + const unsigned IndexOfSizeArg, ProgramStateRef State, std::optional<SVal> RetVal = std::nullopt); /// Model functions with the ownership_returns attribute. @@ -554,6 +579,17 @@ private: [[nodiscard]] ProgramStateRef MallocMemReturnsAttr(CheckerContext &C, const CallEvent &Call, const OwnershipAttr *Att, ProgramStateRef State) const; + /// Models memory allocation. + /// + /// \param [in] C Checker context. + /// \param [in] Call The expression that allocates memory. + /// \param [in] State The \c ProgramState right before allocation. + /// \param [in] isAlloca Is the allocation function alloca-like + /// \returns The ProgramState with returnValue bound + [[nodiscard]] ProgramStateRef MallocBindRetVal(CheckerContext &C, + const CallEvent &Call, + ProgramStateRef State, + bool isAlloca) const; /// Models memory allocation. /// @@ -904,16 +940,16 @@ protected: // A symbol from when the primary region should have been reallocated. SymbolRef FailedReallocSymbol; - // A C++ destructor stack frame in which memory was released. Used for + // A release function stack frame in which memory was released. Used for // miscellaneous false positive suppression. - const StackFrameContext *ReleaseDestructorLC; + const StackFrameContext *ReleaseFunctionLC; bool IsLeak; public: MallocBugVisitor(SymbolRef S, bool isLeak = false) : Sym(S), Mode(Normal), FailedReallocSymbol(nullptr), - ReleaseDestructorLC(nullptr), IsLeak(isLeak) {} + ReleaseFunctionLC(nullptr), IsLeak(isLeak) {} static void *getTag() { static int Tag = 0; @@ -1031,13 +1067,28 @@ public: }; } // end anonymous namespace -static bool isStandardNewDelete(const FunctionDecl *FD) { +static bool isStandardNew(const FunctionDecl *FD) { if (!FD) return false; OverloadedOperatorKind Kind = FD->getOverloadedOperator(); - if (Kind != OO_New && Kind != OO_Array_New && Kind != OO_Delete && - Kind != OO_Array_Delete) + if (Kind != OO_New && Kind != OO_Array_New) + return false; + + // This is standard if and only if it's not defined in a user file. + SourceLocation L = FD->getLocation(); + // If the header for operator delete is not included, it's still defined + // in an invalid source location. Check to make sure we don't crash. + return !L.isValid() || + FD->getASTContext().getSourceManager().isInSystemHeader(L); +} + +static bool isStandardDelete(const FunctionDecl *FD) { + if (!FD) + return false; + + OverloadedOperatorKind Kind = FD->getOverloadedOperator(); + if (Kind != OO_Delete && Kind != OO_Array_Delete) return false; // This is standard if and only if it's not defined in a user file. @@ -1052,6 +1103,12 @@ static bool isStandardNewDelete(const FunctionDecl *FD) { // Methods of MallocChecker and MallocBugVisitor. //===----------------------------------------------------------------------===// +bool MallocChecker::isFreeingOwnershipAttrCall(const CallEvent &Call) { + const auto *Func = dyn_cast_or_null<FunctionDecl>(Call.getDecl()); + + return Func && isFreeingOwnershipAttrCall(Func); +} + bool MallocChecker::isFreeingOwnershipAttrCall(const FunctionDecl *Func) { if (Func->hasAttrs()) { for (const auto *I : Func->specific_attrs<OwnershipAttr>()) { @@ -1067,15 +1124,27 @@ bool MallocChecker::isFreeingCall(const CallEvent &Call) const { if (FreeingMemFnMap.lookup(Call) || ReallocatingMemFnMap.lookup(Call)) return true; - if (const auto *Func = dyn_cast_or_null<FunctionDecl>(Call.getDecl())) - return isFreeingOwnershipAttrCall(Func); + return isFreeingOwnershipAttrCall(Call); +} + +bool MallocChecker::isAllocatingOwnershipAttrCall(const CallEvent &Call) { + const auto *Func = dyn_cast_or_null<FunctionDecl>(Call.getDecl()); + + return Func && isAllocatingOwnershipAttrCall(Func); +} + +bool MallocChecker::isAllocatingOwnershipAttrCall(const FunctionDecl *Func) { + for (const auto *I : Func->specific_attrs<OwnershipAttr>()) { + if (I->getOwnKind() == OwnershipAttr::Returns) + return true; + } return false; } bool MallocChecker::isMemCall(const CallEvent &Call) const { if (FreeingMemFnMap.lookup(Call) || AllocatingMemFnMap.lookup(Call) || - ReallocatingMemFnMap.lookup(Call)) + AllocaMemFnMap.lookup(Call) || ReallocatingMemFnMap.lookup(Call)) return true; if (!ShouldIncludeOwnershipAnnotatedFunctions) @@ -1182,18 +1251,18 @@ SVal MallocChecker::evalMulForBufferSize(CheckerContext &C, const Expr *Blocks, return TotalSize; } -void MallocChecker::checkBasicAlloc(const CallEvent &Call, +void MallocChecker::checkBasicAlloc(ProgramStateRef State, + const CallEvent &Call, CheckerContext &C) const { - ProgramStateRef State = C.getState(); State = MallocMemAux(C, Call, Call.getArgExpr(0), UndefinedVal(), State, AllocationFamily(AF_Malloc)); - State = ProcessZeroAllocCheck(Call, 0, State); + State = ProcessZeroAllocCheck(C, Call, 0, State); C.addTransition(State); } -void MallocChecker::checkKernelMalloc(const CallEvent &Call, +void MallocChecker::checkKernelMalloc(ProgramStateRef State, + const CallEvent &Call, CheckerContext &C) const { - ProgramStateRef State = C.getState(); std::optional<ProgramStateRef> MaybeState = performKernelMalloc(Call, C, State); if (MaybeState) @@ -1226,7 +1295,8 @@ static bool isGRealloc(const CallEvent &Call) { AC.UnsignedLongTy; } -void MallocChecker::checkRealloc(const CallEvent &Call, CheckerContext &C, +void MallocChecker::checkRealloc(ProgramStateRef State, const CallEvent &Call, + CheckerContext &C, bool ShouldFreeOnFail) const { // Ignore calls to functions whose type does not match the expected type of // either the standard realloc or g_realloc from GLib. @@ -1236,24 +1306,22 @@ void MallocChecker::checkRealloc(const CallEvent &Call, CheckerContext &C, if (!isStandardRealloc(Call) && !isGRealloc(Call)) return; - ProgramStateRef State = C.getState(); State = ReallocMemAux(C, Call, ShouldFreeOnFail, State, AllocationFamily(AF_Malloc)); - State = ProcessZeroAllocCheck(Call, 1, State); + State = ProcessZeroAllocCheck(C, Call, 1, State); C.addTransition(State); } -void MallocChecker::checkCalloc(const CallEvent &Call, +void MallocChecker::checkCalloc(ProgramStateRef State, const CallEvent &Call, CheckerContext &C) const { - ProgramStateRef State = C.getState(); State = CallocMem(C, Call, State); - State = ProcessZeroAllocCheck(Call, 0, State); - State = ProcessZeroAllocCheck(Call, 1, State); + State = ProcessZeroAllocCheck(C, Call, 0, State); + State = ProcessZeroAllocCheck(C, Call, 1, State); C.addTransition(State); } -void MallocChecker::checkFree(const CallEvent &Call, CheckerContext &C) const { - ProgramStateRef State = C.getState(); +void MallocChecker::checkFree(ProgramStateRef State, const CallEvent &Call, + CheckerContext &C) const { bool IsKnownToBeAllocatedMemory = false; if (suppressDeallocationsInSuspiciousContexts(Call, C)) return; @@ -1262,29 +1330,28 @@ void MallocChecker::checkFree(const CallEvent &Call, CheckerContext &C) const { C.addTransition(State); } -void MallocChecker::checkAlloca(const CallEvent &Call, +void MallocChecker::checkAlloca(ProgramStateRef State, const CallEvent &Call, CheckerContext &C) const { - ProgramStateRef State = C.getState(); State = MallocMemAux(C, Call, Call.getArgExpr(0), UndefinedVal(), State, AllocationFamily(AF_Alloca)); - State = ProcessZeroAllocCheck(Call, 0, State); + State = ProcessZeroAllocCheck(C, Call, 0, State); C.addTransition(State); } -void MallocChecker::checkStrdup(const CallEvent &Call, +void MallocChecker::checkStrdup(ProgramStateRef State, const CallEvent &Call, CheckerContext &C) const { - ProgramStateRef State = C.getState(); const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr()); if (!CE) return; - State = MallocUpdateRefState(C, CE, State, AllocationFamily(AF_Malloc)); + State = MallocMemAux(C, Call, UnknownVal(), UnknownVal(), State, + AllocationFamily(AF_Malloc)); C.addTransition(State); } -void MallocChecker::checkIfNameIndex(const CallEvent &Call, +void MallocChecker::checkIfNameIndex(ProgramStateRef State, + const CallEvent &Call, CheckerContext &C) const { - ProgramStateRef State = C.getState(); // Should we model this differently? We can allocate a fixed number of // elements with zeros in the last one. State = MallocMemAux(C, Call, UnknownVal(), UnknownVal(), State, @@ -1293,18 +1360,18 @@ void MallocChecker::checkIfNameIndex(const CallEvent &Call, C.addTransition(State); } -void MallocChecker::checkIfFreeNameIndex(const CallEvent &Call, +void MallocChecker::checkIfFreeNameIndex(ProgramStateRef State, + const CallEvent &Call, CheckerContext &C) const { - ProgramStateRef State = C.getState(); bool IsKnownToBeAllocatedMemory = false; State = FreeMemAux(C, Call, State, 0, false, IsKnownToBeAllocatedMemory, AllocationFamily(AF_IfNameIndex)); C.addTransition(State); } -void MallocChecker::checkCXXNewOrCXXDelete(const CallEvent &Call, +void MallocChecker::checkCXXNewOrCXXDelete(ProgramStateRef State, + const CallEvent &Call, CheckerContext &C) const { - ProgramStateRef State = C.getState(); bool IsKnownToBeAllocatedMemory = false; const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr()); if (!CE) @@ -1321,12 +1388,12 @@ void MallocChecker::checkCXXNewOrCXXDelete(const CallEvent &Call, case OO_New: State = MallocMemAux(C, Call, CE->getArg(0), UndefinedVal(), State, AllocationFamily(AF_CXXNew)); - State = ProcessZeroAllocCheck(Call, 0, State); + State = ProcessZeroAllocCheck(C, Call, 0, State); break; case OO_Array_New: State = MallocMemAux(C, Call, CE->getArg(0), UndefinedVal(), State, AllocationFamily(AF_CXXNewArray)); - State = ProcessZeroAllocCheck(Call, 0, State); + State = ProcessZeroAllocCheck(C, Call, 0, State); break; case OO_Delete: State = FreeMemAux(C, Call, State, 0, false, IsKnownToBeAllocatedMemory, @@ -1344,48 +1411,44 @@ void MallocChecker::checkCXXNewOrCXXDelete(const CallEvent &Call, C.addTransition(State); } -void MallocChecker::checkGMalloc0(const CallEvent &Call, +void MallocChecker::checkGMalloc0(ProgramStateRef State, const CallEvent &Call, CheckerContext &C) const { - ProgramStateRef State = C.getState(); SValBuilder &svalBuilder = C.getSValBuilder(); SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy); State = MallocMemAux(C, Call, Call.getArgExpr(0), zeroVal, State, AllocationFamily(AF_Malloc)); - State = ProcessZeroAllocCheck(Call, 0, State); + State = ProcessZeroAllocCheck(C, Call, 0, State); C.addTransition(State); } -void MallocChecker::checkGMemdup(const CallEvent &Call, +void MallocChecker::checkGMemdup(ProgramStateRef State, const CallEvent &Call, CheckerContext &C) const { - ProgramStateRef State = C.getState(); State = MallocMemAux(C, Call, Call.getArgExpr(1), UnknownVal(), State, AllocationFamily(AF_Malloc)); - State = ProcessZeroAllocCheck(Call, 1, State); + State = ProcessZeroAllocCheck(C, Call, 1, State); C.addTransition(State); } -void MallocChecker::checkGMallocN(const CallEvent &Call, +void MallocChecker::checkGMallocN(ProgramStateRef State, const CallEvent &Call, CheckerContext &C) const { - ProgramStateRef State = C.getState(); SVal Init = UndefinedVal(); SVal TotalSize = evalMulForBufferSize(C, Call.getArgExpr(0), Call.getArgExpr(1)); State = MallocMemAux(C, Call, TotalSize, Init, State, AllocationFamily(AF_Malloc)); - State = ProcessZeroAllocCheck(Call, 0, State); - State = ProcessZeroAllocCheck(Call, 1, State); + State = ProcessZeroAllocCheck(C, Call, 0, State); + State = ProcessZeroAllocCheck(C, Call, 1, State); C.addTransition(State); } -void MallocChecker::checkGMallocN0(const CallEvent &Call, +void MallocChecker::checkGMallocN0(ProgramStateRef State, const CallEvent &Call, CheckerContext &C) const { - ProgramStateRef State = C.getState(); SValBuilder &SB = C.getSValBuilder(); SVal Init = SB.makeZeroVal(SB.getContext().CharTy); SVal TotalSize = evalMulForBufferSize(C, Call.getArgExpr(0), Call.getArgExpr(1)); State = MallocMemAux(C, Call, TotalSize, Init, State, AllocationFamily(AF_Malloc)); - State = ProcessZeroAllocCheck(Call, 0, State); - State = ProcessZeroAllocCheck(Call, 1, State); + State = ProcessZeroAllocCheck(C, Call, 0, State); + State = ProcessZeroAllocCheck(C, Call, 1, State); C.addTransition(State); } @@ -1395,14 +1458,13 @@ static bool isFromStdNamespace(const CallEvent &Call) { return FD->isInStdNamespace(); } -void MallocChecker::preGetdelim(const CallEvent &Call, +void MallocChecker::preGetdelim(ProgramStateRef State, const CallEvent &Call, CheckerContext &C) const { // Discard calls to the C++ standard library function std::getline(), which // is completely unrelated to the POSIX getline() that we're checking. if (isFromStdNamespace(Call)) return; - ProgramStateRef State = C.getState(); const auto LinePtr = getPointeeVal(Call.getArgSVal(0), State); if (!LinePtr) return; @@ -1419,22 +1481,19 @@ void MallocChecker::preGetdelim(const CallEvent &Call, C.addTransition(State); } -void MallocChecker::checkGetdelim(const CallEvent &Call, +void MallocChecker::checkGetdelim(ProgramStateRef State, const CallEvent &Call, CheckerContext &C) const { // Discard calls to the C++ standard library function std::getline(), which // is completely unrelated to the POSIX getline() that we're checking. if (isFromStdNamespace(Call)) return; - ProgramStateRef State = C.getState(); // Handle the post-conditions of getline and getdelim: // Register the new conjured value as an allocated buffer. const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr()); if (!CE) return; - SValBuilder &SVB = C.getSValBuilder(); - const auto LinePtr = getPointeeVal(Call.getArgSVal(0), State)->getAs<DefinedSVal>(); const auto Size = @@ -1442,25 +1501,24 @@ void MallocChecker::checkGetdelim(const CallEvent &Call, if (!LinePtr || !Size || !LinePtr->getAsRegion()) return; - State = setDynamicExtent(State, LinePtr->getAsRegion(), *Size, SVB); + State = setDynamicExtent(State, LinePtr->getAsRegion(), *Size); C.addTransition(MallocUpdateRefState(C, CE, State, AllocationFamily(AF_Malloc), *LinePtr)); } -void MallocChecker::checkReallocN(const CallEvent &Call, +void MallocChecker::checkReallocN(ProgramStateRef State, const CallEvent &Call, CheckerContext &C) const { - ProgramStateRef State = C.getState(); State = ReallocMemAux(C, Call, /*ShouldFreeOnFail=*/false, State, AllocationFamily(AF_Malloc), /*SuffixWithN=*/true); - State = ProcessZeroAllocCheck(Call, 1, State); - State = ProcessZeroAllocCheck(Call, 2, State); + State = ProcessZeroAllocCheck(C, Call, 1, State); + State = ProcessZeroAllocCheck(C, Call, 2, State); C.addTransition(State); } -void MallocChecker::checkOwnershipAttr(const CallEvent &Call, +void MallocChecker::checkOwnershipAttr(ProgramStateRef State, + const CallEvent &Call, CheckerContext &C) const { - ProgramStateRef State = C.getState(); const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr()); if (!CE) return; @@ -1487,48 +1545,67 @@ void MallocChecker::checkOwnershipAttr(const CallEvent &Call, C.addTransition(State); } -void MallocChecker::checkPostCall(const CallEvent &Call, - CheckerContext &C) const { - if (C.wasInlined) - return; +bool MallocChecker::evalCall(const CallEvent &Call, CheckerContext &C) const { if (!Call.getOriginExpr()) - return; + return false; ProgramStateRef State = C.getState(); if (const CheckFn *Callback = FreeingMemFnMap.lookup(Call)) { - (*Callback)(this, Call, C); - return; + (*Callback)(this, State, Call, C); + return true; } if (const CheckFn *Callback = AllocatingMemFnMap.lookup(Call)) { - (*Callback)(this, Call, C); - return; + State = MallocBindRetVal(C, Call, State, false); + (*Callback)(this, State, Call, C); + return true; } if (const CheckFn *Callback = ReallocatingMemFnMap.lookup(Call)) { - (*Callback)(this, Call, C); - return; + State = MallocBindRetVal(C, Call, State, false); + (*Callback)(this, State, Call, C); + return true; } - if (isStandardNewDelete(Call)) { - checkCXXNewOrCXXDelete(Call, C); - return; + if (isStandardNew(Call)) { + State = MallocBindRetVal(C, Call, State, false); + checkCXXNewOrCXXDelete(State, Call, C); + return true; } - checkOwnershipAttr(Call, C); + if (isStandardDelete(Call)) { + checkCXXNewOrCXXDelete(State, Call, C); + return true; + } + + if (const CheckFn *Callback = AllocaMemFnMap.lookup(Call)) { + State = MallocBindRetVal(C, Call, State, true); + (*Callback)(this, State, Call, C); + return true; + } + + if (isFreeingOwnershipAttrCall(Call)) { + checkOwnershipAttr(State, Call, C); + return true; + } + + if (isAllocatingOwnershipAttrCall(Call)) { + State = MallocBindRetVal(C, Call, State, false); + checkOwnershipAttr(State, Call, C); + return true; + } + + return false; } // Performs a 0-sized allocations check. ProgramStateRef MallocChecker::ProcessZeroAllocCheck( - const CallEvent &Call, const unsigned IndexOfSizeArg, ProgramStateRef State, - std::optional<SVal> RetVal) { + CheckerContext &C, const CallEvent &Call, const unsigned IndexOfSizeArg, + ProgramStateRef State, std::optional<SVal> RetVal) { if (!State) return nullptr; - if (!RetVal) - RetVal = Call.getReturnValue(); - const Expr *Arg = nullptr; if (const CallExpr *CE = dyn_cast<CallExpr>(Call.getOriginExpr())) { @@ -1545,6 +1622,9 @@ ProgramStateRef MallocChecker::ProcessZeroAllocCheck( return nullptr; } + if (!RetVal) + RetVal = State->getSVal(Call.getOriginExpr(), C.getLocationContext()); + assert(Arg); auto DefArgVal = @@ -1656,7 +1736,7 @@ MallocChecker::processNewAllocation(const CXXAllocatorCall &Call, } State = MallocUpdateRefState(C, NE, State, Family, Target); - State = ProcessZeroAllocCheck(Call, 0, State, Target); + State = ProcessZeroAllocCheck(C, Call, 0, State, Target); return State; } @@ -1736,6 +1816,24 @@ MallocChecker::MallocMemReturnsAttr(CheckerContext &C, const CallEvent &Call, return MallocMemAux(C, Call, UnknownVal(), UndefinedVal(), State, Family); } +ProgramStateRef MallocChecker::MallocBindRetVal(CheckerContext &C, + const CallEvent &Call, + ProgramStateRef State, + bool isAlloca) const { + const Expr *CE = Call.getOriginExpr(); + + // We expect the allocation functions to return a pointer. + if (!Loc::isLocType(CE->getType())) + return nullptr; + + unsigned Count = C.blockCount(); + SValBuilder &SVB = C.getSValBuilder(); + const LocationContext *LCtx = C.getPredecessor()->getLocationContext(); + DefinedSVal RetVal = isAlloca ? SVB.getAllocaRegionVal(CE, LCtx, Count) + : SVB.getConjuredHeapSymbolVal(CE, LCtx, Count); + return State->BindExpr(CE, C.getLocationContext(), RetVal); +} + ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C, const CallEvent &Call, const Expr *SizeEx, SVal Init, @@ -1814,20 +1912,12 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C, const Expr *CE = Call.getOriginExpr(); // We expect the malloc functions to return a pointer. - if (!Loc::isLocType(CE->getType())) - return nullptr; + // Should have been already checked. + assert(Loc::isLocType(CE->getType()) && + "Allocation functions must return a pointer"); - // Bind the return value to the symbolic value from the heap region. - // TODO: move use of this functions to an EvalCall callback, becasue - // BindExpr() should'nt be used elsewhere. - unsigned Count = C.blockCount(); - SValBuilder &SVB = C.getSValBuilder(); const LocationContext *LCtx = C.getPredecessor()->getLocationContext(); - DefinedSVal RetVal = ((Family.Kind == AF_Alloca) - ? SVB.getAllocaRegionVal(CE, LCtx, Count) - : SVB.getConjuredHeapSymbolVal(CE, LCtx, Count) - .castAs<DefinedSVal>()); - State = State->BindExpr(CE, C.getLocationContext(), RetVal); + SVal RetVal = State->getSVal(CE, C.getLocationContext()); // Fill the region with the initialization value. State = State->bindDefaultInitial(RetVal, Init, LCtx); @@ -1840,7 +1930,7 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C, // Set the region's extent. State = setDynamicExtent(State, RetVal.getAsRegion(), - Size.castAs<DefinedOrUnknownSVal>(), SVB); + Size.castAs<DefinedOrUnknownSVal>()); return MallocUpdateRefState(C, CE, State, Family); } @@ -1854,7 +1944,7 @@ static ProgramStateRef MallocUpdateRefState(CheckerContext &C, const Expr *E, // Get the return value. if (!RetVal) - RetVal = C.getSVal(E); + RetVal = State->getSVal(E, C.getLocationContext()); // We expect the malloc functions to return a pointer. if (!RetVal->getAs<Loc>()) @@ -1862,20 +1952,15 @@ static ProgramStateRef MallocUpdateRefState(CheckerContext &C, const Expr *E, SymbolRef Sym = RetVal->getAsLocSymbol(); - // This is a return value of a function that was not inlined, such as malloc() - // or new(). We've checked that in the caller. Therefore, it must be a symbol. - assert(Sym); - // FIXME: In theory this assertion should fail for `alloca()` calls (because - // `AllocaRegion`s are not symbolic); but in practice this does not happen. - // As the current code appears to work correctly, I'm not touching this issue - // now, but it would be good to investigate and clarify this. - // Also note that perhaps the special `AllocaRegion` should be replaced by - // `SymbolicRegion` (or turned into a subclass of `SymbolicRegion`) to enable - // proper tracking of memory allocated by `alloca()` -- and after that change - // this assertion would become valid again. + // NOTE: If this was an `alloca()` call, then `RetVal` holds an + // `AllocaRegion`, so `Sym` will be a nullpointer because `AllocaRegion`s do + // not have an associated symbol. However, this distinct region type means + // that we don't need to store anything about them in `RegionState`. - // Set the symbol's state to Allocated. - return State->set<RegionState>(Sym, RefState::getAllocated(Family, E)); + if (Sym) + return State->set<RegionState>(Sym, RefState::getAllocated(Family, E)); + + return State; } ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C, @@ -2214,6 +2299,14 @@ MallocChecker::FreeMemAux(CheckerContext &C, const Expr *ArgExpr, // that. assert(!RsBase || (RsBase && RsBase->getAllocationFamily() == Family)); + // Assume that after memory is freed, it contains unknown values. This + // conforts languages standards, since reading from freed memory is considered + // UB and may result in arbitrary value. + State = State->invalidateRegions({location}, Call.getOriginExpr(), + C.blockCount(), C.getLocationContext(), + /*CausesPointerEscape=*/false, + /*InvalidatedSymbols=*/nullptr); + // Normal free. if (Hold) return State->set<RegionState>(SymBase, @@ -2781,6 +2874,7 @@ MallocChecker::ReallocMemAux(CheckerContext &C, const CallEvent &Call, return stateMalloc; } + // Proccess as allocation of 0 bytes. if (PrtIsNull && SizeIsZero) return State; @@ -2815,7 +2909,7 @@ MallocChecker::ReallocMemAux(CheckerContext &C, const CallEvent &Call, // Get the from and to pointer symbols as in toPtr = realloc(fromPtr, size). SymbolRef FromPtr = arg0Val.getLocSymbolInBase(); - SVal RetVal = C.getSVal(CE); + SVal RetVal = stateRealloc->getSVal(CE, C.getLocationContext()); SymbolRef ToPtr = RetVal.getAsSymbol(); assert(FromPtr && ToPtr && "By this point, FreeMemAux and MallocMemAux should have checked " @@ -3014,6 +3108,14 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper, C.addTransition(state->set<RegionState>(RS), N); } +void MallocChecker::checkPostCall(const CallEvent &Call, + CheckerContext &C) const { + if (const auto *PostFN = PostFnMap.lookup(Call)) { + (*PostFN)(this, C.getState(), Call, C); + return; + } +} + void MallocChecker::checkPreCall(const CallEvent &Call, CheckerContext &C) const { @@ -3047,7 +3149,7 @@ void MallocChecker::checkPreCall(const CallEvent &Call, // We need to handle getline pre-conditions here before the pointed region // gets invalidated by StreamChecker if (const auto *PreFN = PreFnMap.lookup(Call)) { - (*PreFN)(this, Call, C); + (*PreFN)(this, C.getState(), Call, C); return; } @@ -3551,21 +3653,25 @@ PathDiagnosticPieceRef MallocBugVisitor::VisitNode(const ExplodedNode *N, const LocationContext *CurrentLC = N->getLocationContext(); - // If we find an atomic fetch_add or fetch_sub within the destructor in which - // the pointer was released (before the release), this is likely a destructor - // of a shared pointer. + // If we find an atomic fetch_add or fetch_sub within the function in which + // the pointer was released (before the release), this is likely a release + // point of reference-counted object (like shared pointer). + // // Because we don't model atomics, and also because we don't know that the // original reference count is positive, we should not report use-after-frees - // on objects deleted in such destructors. This can probably be improved + // on objects deleted in such functions. This can probably be improved // through better shared pointer modeling. - if (ReleaseDestructorLC && (ReleaseDestructorLC == CurrentLC || - ReleaseDestructorLC->isParentOf(CurrentLC))) { + if (ReleaseFunctionLC && (ReleaseFunctionLC == CurrentLC || + ReleaseFunctionLC->isParentOf(CurrentLC))) { if (const auto *AE = dyn_cast<AtomicExpr>(S)) { // Check for manual use of atomic builtins. AtomicExpr::AtomicOp Op = AE->getOp(); if (Op == AtomicExpr::AO__c11_atomic_fetch_add || Op == AtomicExpr::AO__c11_atomic_fetch_sub) { BR.markInvalid(getTag(), S); + // After report is considered invalid there is no need to proceed + // futher. + return nullptr; } } else if (const auto *CE = dyn_cast<CallExpr>(S)) { // Check for `std::atomic` and such. This covers both regular method calls @@ -3577,6 +3683,9 @@ PathDiagnosticPieceRef MallocBugVisitor::VisitNode(const ExplodedNode *N, // "__atomic_base" or something. if (StringRef(RD->getNameAsString()).contains("atomic")) { BR.markInvalid(getTag(), S); + // After report is considered invalid there is no need to proceed + // futher. + return nullptr; } } } @@ -3648,35 +3757,54 @@ PathDiagnosticPieceRef MallocBugVisitor::VisitNode(const ExplodedNode *N, return nullptr; } - // See if we're releasing memory while inlining a destructor - // (or one of its callees). This turns on various common - // false positive suppressions. - bool FoundAnyDestructor = false; - for (const LocationContext *LC = CurrentLC; LC; LC = LC->getParent()) { - if (const auto *DD = dyn_cast<CXXDestructorDecl>(LC->getDecl())) { - if (isReferenceCountingPointerDestructor(DD)) { - // This immediately looks like a reference-counting destructor. - // We're bad at guessing the original reference count of the object, - // so suppress the report for now. - BR.markInvalid(getTag(), DD); - } else if (!FoundAnyDestructor) { - assert(!ReleaseDestructorLC && - "There can be only one release point!"); - // Suspect that it's a reference counting pointer destructor. - // On one of the next nodes might find out that it has atomic - // reference counting operations within it (see the code above), - // and if so, we'd conclude that it likely is a reference counting - // pointer destructor. - ReleaseDestructorLC = LC->getStackFrame(); - // It is unlikely that releasing memory is delegated to a destructor - // inside a destructor of a shared pointer, because it's fairly hard - // to pass the information that the pointer indeed needs to be - // released into it. So we're only interested in the innermost - // destructor. - FoundAnyDestructor = true; + // Save the first destructor/function as release point. + assert(!ReleaseFunctionLC && "There should be only one release point"); + ReleaseFunctionLC = CurrentLC->getStackFrame(); + + // See if we're releasing memory while inlining a destructor that + // decrement reference counters (or one of its callees). + // This turns on various common false positive suppressions. + for (const LocationContext *LC = CurrentLC; LC; LC = LC->getParent()) { + if (const auto *DD = dyn_cast<CXXDestructorDecl>(LC->getDecl())) { + if (isReferenceCountingPointerDestructor(DD)) { + // This immediately looks like a reference-counting destructor. + // We're bad at guessing the original reference count of the + // object, so suppress the report for now. + BR.markInvalid(getTag(), DD); + + // After report is considered invalid there is no need to proceed + // futher. + return nullptr; + } + + // Switch suspection to outer destructor to catch patterns like: + // (note that class name is distorted to bypass + // isReferenceCountingPointerDestructor() logic) + // + // SmartPointr::~SmartPointr() { + // if (refcount.fetch_sub(1) == 1) + // release_resources(); + // } + // void SmartPointr::release_resources() { + // free(buffer); + // } + // + // This way ReleaseFunctionLC will point to outermost destructor and + // it would be possible to catch wider range of FP. + // + // NOTE: it would be great to support smth like that in C, since + // currently patterns like following won't be supressed: + // + // void doFree(struct Data *data) { free(data); } + // void putData(struct Data *data) + // { + // if (refPut(data)) + // doFree(data); + // } + ReleaseFunctionLC = LC->getStackFrame(); } } - } + } else if (isRelinquished(RSCurr, RSPrev, S)) { Msg = "Memory ownership is transferred"; StackHint = std::make_unique<StackHintGeneratorForSymbol>(Sym, ""); diff --git a/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp index 5240352..52416e2 100644 --- a/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp @@ -12,6 +12,7 @@ // //===----------------------------------------------------------------------===// +#include "Move.h" #include "clang/AST/Attr.h" #include "clang/AST/ExprCXX.h" #include "clang/Driver/DriverDiagnostic.h" diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp index f73c900..456132e 100644 --- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp @@ -635,8 +635,9 @@ public: }; } // namespace -Bindings getAllVarBindingsForSymbol(ProgramStateManager &Manager, - const ExplodedNode *Node, SymbolRef Sym) { +static Bindings getAllVarBindingsForSymbol(ProgramStateManager &Manager, + const ExplodedNode *Node, + SymbolRef Sym) { Bindings Result; VarBindingsCollector Collector{Sym, Result}; while (Result.empty() && Node) { diff --git a/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp index 505020d..321388a 100644 --- a/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp @@ -241,7 +241,7 @@ bool SmartPtrModeling::isBoolConversionMethod(const CallEvent &Call) const { constexpr llvm::StringLiteral BASIC_OSTREAM_NAMES[] = {"basic_ostream"}; -bool isStdBasicOstream(const Expr *E) { +static bool isStdBasicOstream(const Expr *E) { const auto *RD = E->getType()->getAsCXXRecordDecl(); return hasStdClassWithName(RD, BASIC_OSTREAM_NAMES); } @@ -250,7 +250,7 @@ static bool isStdFunctionCall(const CallEvent &Call) { return Call.getDecl() && Call.getDecl()->getDeclContext()->isStdNamespace(); } -bool isStdOstreamOperatorCall(const CallEvent &Call) { +static bool isStdOstreamOperatorCall(const CallEvent &Call) { if (Call.getNumArgs() != 2 || !isStdFunctionCall(Call)) return false; const auto *FC = dyn_cast<SimpleFunctionCall>(&Call); diff --git a/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp index 5394c22..d8c5294 100644 --- a/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp @@ -305,7 +305,7 @@ static const MemSpaceRegion *getStackOrGlobalSpaceRegion(const MemRegion *R) { return nullptr; } -const MemRegion *getOriginBaseRegion(const MemRegion *Reg) { +static const MemRegion *getOriginBaseRegion(const MemRegion *Reg) { Reg = Reg->getBaseRegion(); while (const auto *SymReg = dyn_cast<SymbolicRegion>(Reg)) { const auto *OriginReg = SymReg->getSymbol()->getOriginRegion(); @@ -316,7 +316,7 @@ const MemRegion *getOriginBaseRegion(const MemRegion *Reg) { return Reg; } -std::optional<std::string> printReferrer(const MemRegion *Referrer) { +static std::optional<std::string> printReferrer(const MemRegion *Referrer) { assert(Referrer); const StringRef ReferrerMemorySpace = [](const MemSpaceRegion *Space) { if (isa<StaticGlobalSpaceRegion>(Space)) @@ -354,7 +354,7 @@ std::optional<std::string> printReferrer(const MemRegion *Referrer) { /// Check whether \p Region refers to a freshly minted symbol after an opaque /// function call. -bool isInvalidatedSymbolRegion(const MemRegion *Region) { +static bool isInvalidatedSymbolRegion(const MemRegion *Region) { const auto *SymReg = Region->getAs<SymbolicRegion>(); if (!SymReg) return false; diff --git a/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp index 1987796..a026735 100644 --- a/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp @@ -31,7 +31,7 @@ REGISTER_MAP_WITH_PROGRAMSTATE(VariantHeldTypeMap, const MemRegion *, QualType) namespace clang::ento::tagged_union_modeling { -const CXXConstructorDecl * +static const CXXConstructorDecl * getConstructorDeclarationForCall(const CallEvent &Call) { const auto *ConstructorCall = dyn_cast<CXXConstructorCall>(&Call); if (!ConstructorCall) @@ -76,7 +76,7 @@ bool isMoveAssignmentCall(const CallEvent &Call) { return AsMethodDecl->isMoveAssignmentOperator(); } -bool isStdType(const Type *Type, llvm::StringRef TypeName) { +static bool isStdType(const Type *Type, llvm::StringRef TypeName) { auto *Decl = Type->getAsRecordDecl(); if (!Decl) return false; diff --git a/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp index 87d255e..8d17ba5 100644 --- a/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp @@ -266,7 +266,6 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const { return; ASTContext &Ctx = C.getASTContext(); - SValBuilder &SVB = C.getSValBuilder(); ProgramStateRef State = C.getState(); QualType TypeToCheck; @@ -301,7 +300,7 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const { if (VD) { State = setDynamicExtent(State, State->getRegion(VD, C.getLocationContext()), - ArraySize.castAs<NonLoc>(), SVB); + ArraySize.castAs<NonLoc>()); } // Remember our assumptions! diff --git a/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp index 5c10e75..b0563b6 100644 --- a/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp +++ b/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp @@ -173,7 +173,7 @@ const PointerToMemberData *BasicValueFactory::getPointerToMemberData( return D; } -LLVM_ATTRIBUTE_UNUSED bool hasNoRepeatedElements( +LLVM_ATTRIBUTE_UNUSED static bool hasNoRepeatedElements( llvm::ImmutableList<const CXXBaseSpecifier *> BaseSpecList) { llvm::SmallPtrSet<QualType, 16> BaseSpecSeen; for (const CXXBaseSpecifier *BaseSpec : BaseSpecList) { diff --git a/clang/lib/StaticAnalyzer/Core/DynamicExtent.cpp b/clang/lib/StaticAnalyzer/Core/DynamicExtent.cpp index 6cf0641..f0c6501 100644 --- a/clang/lib/StaticAnalyzer/Core/DynamicExtent.cpp +++ b/clang/lib/StaticAnalyzer/Core/DynamicExtent.cpp @@ -120,7 +120,7 @@ DefinedOrUnknownSVal getDynamicElementCountWithOffset(ProgramStateRef State, } ProgramStateRef setDynamicExtent(ProgramStateRef State, const MemRegion *MR, - DefinedOrUnknownSVal Size, SValBuilder &SVB) { + DefinedOrUnknownSVal Size) { MR = MR->StripCasts(); if (Size.isUnknown()) diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp index 9d3e4fc..2ca24d0 100644 --- a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp +++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp @@ -817,8 +817,7 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call, if (Size.isUndef()) Size = UnknownVal(); - State = setDynamicExtent(State, MR, Size.castAs<DefinedOrUnknownSVal>(), - svalBuilder); + State = setDynamicExtent(State, MR, Size.castAs<DefinedOrUnknownSVal>()); } else { R = svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count); } diff --git a/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp index 28d9de2..5c0df88 100644 --- a/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp +++ b/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp @@ -1211,18 +1211,19 @@ const arrowIndices = )<<<"; OS.str()); } -std::string getSpanBeginForControl(const char *ClassName, unsigned Index) { +static std::string getSpanBeginForControl(const char *ClassName, + unsigned Index) { std::string Result; llvm::raw_string_ostream OS(Result); OS << "<span id=\"" << ClassName << Index << "\">"; return Result; } -std::string getSpanBeginForControlStart(unsigned Index) { +static std::string getSpanBeginForControlStart(unsigned Index) { return getSpanBeginForControl("start", Index); } -std::string getSpanBeginForControlEnd(unsigned Index) { +static std::string getSpanBeginForControlEnd(unsigned Index) { return getSpanBeginForControl("end", Index); } diff --git a/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp b/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp index 7042f1a..96f5d7c 100644 --- a/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp +++ b/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp @@ -265,8 +265,8 @@ static bool isPossiblyEscaped(ExplodedNode *N, const DeclRefExpr *DR) { llvm_unreachable("Reached root without finding the declaration of VD"); } -bool shouldCompletelyUnroll(const Stmt *LoopStmt, ASTContext &ASTCtx, - ExplodedNode *Pred, unsigned &maxStep) { +static bool shouldCompletelyUnroll(const Stmt *LoopStmt, ASTContext &ASTCtx, + ExplodedNode *Pred, unsigned &maxStep) { if (!isLoopStmt(LoopStmt)) return false; @@ -297,7 +297,7 @@ bool shouldCompletelyUnroll(const Stmt *LoopStmt, ASTContext &ASTCtx, return !isPossiblyEscaped(Pred, CounterVarRef); } -bool madeNewBranch(ExplodedNode *N, const Stmt *LoopStmt) { +static bool madeNewBranch(ExplodedNode *N, const Stmt *LoopStmt) { const Stmt *S = nullptr; while (!N->pred_empty()) { if (N->succ_size() > 1) diff --git a/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp index fab8e35..70d5a60 100644 --- a/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp +++ b/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp @@ -158,7 +158,7 @@ RangeSet RangeSet::Factory::unite(RangeSet Original, llvm::APSInt From, } template <typename T> -void swapIterators(T &First, T &FirstEnd, T &Second, T &SecondEnd) { +static void swapIterators(T &First, T &FirstEnd, T &Second, T &SecondEnd) { std::swap(First, Second); std::swap(FirstEnd, SecondEnd); } @@ -2624,7 +2624,7 @@ EquivalenceClass::removeMember(ProgramStateRef State, const SymbolRef Old) { } // Re-evaluate an SVal with top-level `State->assume` logic. -[[nodiscard]] ProgramStateRef +[[nodiscard]] static ProgramStateRef reAssume(ProgramStateRef State, const RangeSet *Constraint, SVal TheValue) { if (!Constraint) return State; diff --git a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp index eb9cde5c..7eca057 100644 --- a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp +++ b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp @@ -210,22 +210,24 @@ DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const Stmt *stmt, return nonloc::SymbolVal(sym); } -DefinedOrUnknownSVal -SValBuilder::getConjuredHeapSymbolVal(const Expr *E, - const LocationContext *LCtx, - unsigned VisitCount) { +DefinedSVal SValBuilder::getConjuredHeapSymbolVal(const Expr *E, + const LocationContext *LCtx, + unsigned VisitCount) { QualType T = E->getType(); return getConjuredHeapSymbolVal(E, LCtx, T, VisitCount); } -DefinedOrUnknownSVal -SValBuilder::getConjuredHeapSymbolVal(const Expr *E, - const LocationContext *LCtx, - QualType type, unsigned VisitCount) { +DefinedSVal SValBuilder::getConjuredHeapSymbolVal(const Expr *E, + const LocationContext *LCtx, + QualType type, + unsigned VisitCount) { assert(Loc::isLocType(type)); assert(SymbolManager::canSymbolicate(type)); - if (type->isNullPtrType()) - return makeZeroVal(type); + if (type->isNullPtrType()) { + // makeZeroVal() returns UnknownVal only in case of FP number, which + // is not the case. + return makeZeroVal(type).castAs<DefinedSVal>(); + } SymbolRef sym = SymMgr.conjureSymbol(E, LCtx, type, VisitCount); return loc::MemRegionVal(MemMgr.getSymbolicHeapRegion(sym)); diff --git a/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp index 71268af..7cdd545 100644 --- a/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp +++ b/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp @@ -91,7 +91,6 @@ public: ? " [" + PD->getCheckerName() + "]" : "") .str(); - reportPiece(WarnID, PD->getLocation().asLocation(), (PD->getShortDescription() + WarningMsg).str(), PD->path.back()->getRanges(), PD->path.back()->getFixits()); diff --git a/clang/test/AST/ByteCode/invalid.cpp b/clang/test/AST/ByteCode/invalid.cpp index 13ba84b..2a6c2d1 100644 --- a/clang/test/AST/ByteCode/invalid.cpp +++ b/clang/test/AST/ByteCode/invalid.cpp @@ -54,4 +54,7 @@ namespace Casts { B b; (void)*reinterpret_cast<void*>(&b); // both-error {{indirection not permitted on operand of type 'void *'}} } + + /// Just make sure this doesn't crash. + float PR9558 = reinterpret_cast<const float&>("asd"); } diff --git a/clang/test/Analysis/NewDelete-atomics.cpp b/clang/test/Analysis/NewDelete-atomics.cpp index 1425aca..cb0d5aa 100644 --- a/clang/test/Analysis/NewDelete-atomics.cpp +++ b/clang/test/Analysis/NewDelete-atomics.cpp @@ -97,6 +97,32 @@ public: T *getPtr() const { return Ptr; } // no-warning }; +// Also IntrusivePtr with different name and outline release in destructor +template <typename T> +class DifferentlyNamedOutlineRelease { + T *Ptr; + +public: + DifferentlyNamedOutlineRelease(T *Ptr) : Ptr(Ptr) { + Ptr->incRef(); + } + + DifferentlyNamedOutlineRelease(const DifferentlyNamedOutlineRelease &Other) : Ptr(Other.Ptr) { + Ptr->incRef(); + } + + void releasePtr(void) { + delete Ptr; + } + + ~DifferentlyNamedOutlineRelease() { + if (Ptr->decRef() == 1) + releasePtr(); + } + + T *getPtr() const { return Ptr; } // no-warning +}; + void testDestroyLocalRefPtr() { IntrusivePtr<RawObj> p1(new RawObj()); { @@ -176,3 +202,23 @@ void testDestroyLocalRefPtrWithAtomicsDifferentlyNamed( // p1 still maintains ownership. The object is not deleted. p1.getPtr()->foo(); // no-warning } + +void testDestroyLocalRefPtrWithOutlineRelease() { + DifferentlyNamedOutlineRelease <RawObj> p1(new RawObj()); + { + DifferentlyNamedOutlineRelease <RawObj> p2(p1); + } + + // p1 still maintains ownership. The object is not deleted. + p1.getPtr()->foo(); // no-warning +} + +void testDestroySymbolicRefPtrWithOutlineRelease( + const DifferentlyNamedOutlineRelease<RawObj> &p1) { + { + DifferentlyNamedOutlineRelease <RawObj> p2(p1); + } + + // p1 still maintains ownership. The object is not deleted. + p1.getPtr()->foo(); // no-warning +} diff --git a/clang/test/Analysis/NewDelete-checker-test.cpp b/clang/test/Analysis/NewDelete-checker-test.cpp index 1100b49e..21b4cf8 100644 --- a/clang/test/Analysis/NewDelete-checker-test.cpp +++ b/clang/test/Analysis/NewDelete-checker-test.cpp @@ -37,10 +37,6 @@ extern "C" void *malloc(size_t); extern "C" void free (void* ptr); int *global; -//------------------ -// check for leaks -//------------------ - //----- Standard non-placement operators void testGlobalOpNew() { void *p = operator new(0); @@ -67,19 +63,6 @@ void testGlobalNoThrowPlacementExprNewBeforeOverload() { int *p = new(std::nothrow) int; } // leak-warning{{Potential leak of memory pointed to by 'p'}} -//----- Standard pointer placement operators -void testGlobalPointerPlacementNew() { - int i; - - void *p1 = operator new(0, &i); // no warn - - void *p2 = operator new[](0, &i); // no warn - - int *p3 = new(&i) int; // no warn - - int *p4 = new(&i) int[0]; // no warn -} - //----- Other cases void testNewMemoryIsInHeap() { int *p = new int; diff --git a/clang/test/Analysis/NewDelete-intersections.mm b/clang/test/Analysis/NewDelete-intersections.mm index 9ac4716..e897f48 100644 --- a/clang/test/Analysis/NewDelete-intersections.mm +++ b/clang/test/Analysis/NewDelete-intersections.mm @@ -3,13 +3,13 @@ // RUN: -analyzer-checker=core \ // RUN: -analyzer-checker=cplusplus.NewDelete +// leak-no-diagnostics + // RUN: %clang_analyze_cc1 -std=c++11 -DLEAKS -fblocks %s \ // RUN: -verify=leak \ // RUN: -analyzer-checker=core \ // RUN: -analyzer-checker=cplusplus.NewDeleteLeaks -// leak-no-diagnostics - // RUN: %clang_analyze_cc1 -std=c++11 -DLEAKS -fblocks %s \ // RUN: -verify=mismatch \ // RUN: -analyzer-checker=core \ diff --git a/clang/test/Analysis/malloc-interprocedural.c b/clang/test/Analysis/malloc-interprocedural.c index ae7a462..5e5232a 100644 --- a/clang/test/Analysis/malloc-interprocedural.c +++ b/clang/test/Analysis/malloc-interprocedural.c @@ -98,38 +98,3 @@ int uafAndCallsFooWithEmptyReturn(void) { fooWithEmptyReturn(12); return *x; // expected-warning {{Use of memory after it is freed}} } - - -// If we inline any of the malloc-family functions, the checker shouldn't also -// try to do additional modeling. -char *strndup(const char *str, size_t n) { - if (!str) - return 0; - - // DO NOT FIX. This is to test that we are actually using the inlined - // behavior! - if (n < 5) - return 0; - - size_t length = strlen(str); - if (length < n) - n = length; - - char *result = malloc(n + 1); - memcpy(result, str, n); - result[n] = '\0'; - return result; -} - -void useStrndup(size_t n) { - if (n == 0) { - (void)strndup(0, 20); // no-warning - return; - } else if (n < 5) { - (void)strndup("hi there", n); // no-warning - return; - } else { - (void)strndup("hi there", n); - return; // expected-warning{{leak}} - } -} diff --git a/clang/test/Analysis/malloc-refcounted.c b/clang/test/Analysis/malloc-refcounted.c new file mode 100644 index 0000000..bfbe91d --- /dev/null +++ b/clang/test/Analysis/malloc-refcounted.c @@ -0,0 +1,80 @@ +// RUN: %clang_analyze_cc1 -analyzer-checker=core,unix.Malloc -verify %s +// + +typedef __SIZE_TYPE__ size_t; + +typedef enum memory_order { + memory_order_relaxed = __ATOMIC_RELAXED, +} memory_order; + +void *calloc(size_t, size_t); +void free(void *); + +struct SomeData { + int i; + _Atomic int ref; +}; + +static struct SomeData *alloc_data(void) +{ + struct SomeData *data = calloc(sizeof(*data), 1); + + __c11_atomic_store(&data->ref, 2, memory_order_relaxed); + return data; +} + +static void put_data(struct SomeData *data) +{ + if (__c11_atomic_fetch_sub(&data->ref, 1, memory_order_relaxed) == 1) + free(data); +} + +static int dec_refcounter(struct SomeData *data) +{ + return __c11_atomic_fetch_sub(&data->ref, 1, memory_order_relaxed) == 1; +} + +static void put_data_nested(struct SomeData *data) +{ + if (dec_refcounter(data)) + free(data); +} + +static void put_data_uncond(struct SomeData *data) +{ + free(data); +} + +static void put_data_unrelated_atomic(struct SomeData *data) +{ + free(data); + __c11_atomic_fetch_sub(&data->ref, 1, memory_order_relaxed); +} + +void test_no_uaf(void) +{ + struct SomeData *data = alloc_data(); + put_data(data); + data->i += 1; // no warning +} + +void test_no_uaf_nested(void) +{ + struct SomeData *data = alloc_data(); + put_data_nested(data); + data->i += 1; // no warning +} + +void test_uaf(void) +{ + struct SomeData *data = alloc_data(); + put_data_uncond(data); + data->i += 1; // expected-warning{{Use of memory after it is freed}} +} + +void test_no_uaf_atomic_after(void) +{ + struct SomeData *data = alloc_data(); + put_data_unrelated_atomic(data); + data->i += 1; // expected-warning{{Use of memory after it is freed}} +} diff --git a/clang/test/CXX/module/module.import/p2.cpp b/clang/test/CXX/module/module.import/p2.cpp index ef60068..6b8e32f 100644 --- a/clang/test/CXX/module/module.import/p2.cpp +++ b/clang/test/CXX/module/module.import/p2.cpp @@ -30,9 +30,8 @@ void test() { } //--- UseInPartA.cppm -// expected-no-diagnostics export module M:partA; -import :impl; +import :impl; // expected-warning {{importing an implementation partition unit in a module interface is not recommended.}} void test() { A a; } diff --git a/clang/test/CodeGen/arm_acle.c b/clang/test/CodeGen/arm_acle.c index 1c41f1b..74de824 100644 --- a/clang/test/CodeGen/arm_acle.c +++ b/clang/test/CodeGen/arm_acle.c @@ -1,4 +1,5 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -ffreestanding -triple armv8a-none-eabi -O0 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s -check-prefixes=ARM,AArch32 // RUN: %clang_cc1 -ffreestanding -triple armv8a-none-eabi -target-feature +crc -target-feature +dsp -O0 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s -check-prefixes=ARM,AArch32 // RUN: %clang_cc1 -ffreestanding -Wno-error=implicit-function-declaration -triple aarch64-none-elf -target-feature +neon -target-feature +crc -target-feature +crypto -O0 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s -check-prefixes=ARM,AArch64 // RUN: %clang_cc1 -ffreestanding -triple aarch64-none-elf -target-feature +v8.3a -target-feature +crc -O0 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s -check-prefixes=ARM,AArch64,AArch6483 @@ -638,12 +639,15 @@ uint32_t test_usat(int32_t t) { #endif /* 9.4.2 Saturating addition and subtraction intrinsics */ -#ifdef __ARM_FEATURE_DSP +#ifdef __ARM_32BIT_STATE // AArch32-LABEL: @test_qadd( // AArch32-NEXT: entry: // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.qadd(i32 [[A:%.*]], i32 [[B:%.*]]) // AArch32-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_DSP +__attribute__((target("dsp"))) +#endif int32_t test_qadd(int32_t a, int32_t b) { return __qadd(a, b); } @@ -653,6 +657,9 @@ int32_t test_qadd(int32_t a, int32_t b) { // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.qsub(i32 [[A:%.*]], i32 [[B:%.*]]) // AArch32-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_DSP +__attribute__((target("dsp"))) +#endif int32_t test_qsub(int32_t a, int32_t b) { return __qsub(a, b); } @@ -664,6 +671,9 @@ extern int32_t f(); // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.qadd(i32 [[CALL]], i32 [[CALL]]) // AArch32-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_DSP +__attribute__((target("dsp"))) +#endif int32_t test_qdbl() { return __qdbl(f()); } @@ -672,12 +682,15 @@ int32_t test_qdbl() { /* * 9.3 16-bit multiplications */ -#if __ARM_FEATURE_DSP +#ifdef __ARM_32BIT_STATE // AArch32-LABEL: @test_smulbb( // AArch32-NEXT: entry: // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smulbb(i32 [[A:%.*]], i32 [[B:%.*]]) // AArch32-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_DSP +__attribute__((target("dsp"))) +#endif int32_t test_smulbb(int32_t a, int32_t b) { return __smulbb(a, b); } @@ -687,6 +700,9 @@ int32_t test_smulbb(int32_t a, int32_t b) { // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smulbt(i32 [[A:%.*]], i32 [[B:%.*]]) // AArch32-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_DSP +__attribute__((target("dsp"))) +#endif int32_t test_smulbt(int32_t a, int32_t b) { return __smulbt(a, b); } @@ -696,6 +712,9 @@ int32_t test_smulbt(int32_t a, int32_t b) { // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smultb(i32 [[A:%.*]], i32 [[B:%.*]]) // AArch32-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_DSP +__attribute__((target("dsp"))) +#endif int32_t test_smultb(int32_t a, int32_t b) { return __smultb(a, b); } @@ -705,6 +724,9 @@ int32_t test_smultb(int32_t a, int32_t b) { // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smultt(i32 [[A:%.*]], i32 [[B:%.*]]) // AArch32-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_DSP +__attribute__((target("dsp"))) +#endif int32_t test_smultt(int32_t a, int32_t b) { return __smultt(a, b); } @@ -714,6 +736,9 @@ int32_t test_smultt(int32_t a, int32_t b) { // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smulwb(i32 [[A:%.*]], i32 [[B:%.*]]) // AArch32-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_DSP +__attribute__((target("dsp"))) +#endif int32_t test_smulwb(int32_t a, int32_t b) { return __smulwb(a, b); } @@ -723,18 +748,24 @@ int32_t test_smulwb(int32_t a, int32_t b) { // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smulwt(i32 [[A:%.*]], i32 [[B:%.*]]) // AArch32-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_DSP +__attribute__((target("dsp"))) +#endif int32_t test_smulwt(int32_t a, int32_t b) { return __smulwt(a, b); } #endif /* 9.4.3 Accumultating multiplications */ -#if __ARM_FEATURE_DSP +#ifdef __ARM_32BIT_STATE // AArch32-LABEL: @test_smlabb( // AArch32-NEXT: entry: // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smlabb(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) // AArch32-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_DSP +__attribute__((target("dsp"))) +#endif int32_t test_smlabb(int32_t a, int32_t b, int32_t c) { return __smlabb(a, b, c); } @@ -744,6 +775,9 @@ int32_t test_smlabb(int32_t a, int32_t b, int32_t c) { // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smlabt(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) // AArch32-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_DSP +__attribute__((target("dsp"))) +#endif int32_t test_smlabt(int32_t a, int32_t b, int32_t c) { return __smlabt(a, b, c); } @@ -753,6 +787,9 @@ int32_t test_smlabt(int32_t a, int32_t b, int32_t c) { // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smlatb(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) // AArch32-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_DSP +__attribute__((target("dsp"))) +#endif int32_t test_smlatb(int32_t a, int32_t b, int32_t c) { return __smlatb(a, b, c); } @@ -762,6 +799,9 @@ int32_t test_smlatb(int32_t a, int32_t b, int32_t c) { // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smlatt(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) // AArch32-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_DSP +__attribute__((target("dsp"))) +#endif int32_t test_smlatt(int32_t a, int32_t b, int32_t c) { return __smlatt(a, b, c); } @@ -771,6 +811,9 @@ int32_t test_smlatt(int32_t a, int32_t b, int32_t c) { // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smlawb(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) // AArch32-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_DSP +__attribute__((target("dsp"))) +#endif int32_t test_smlawb(int32_t a, int32_t b, int32_t c) { return __smlawb(a, b, c); } @@ -780,6 +823,9 @@ int32_t test_smlawb(int32_t a, int32_t b, int32_t c) { // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smlawt(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) // AArch32-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_DSP +__attribute__((target("dsp"))) +#endif int32_t test_smlawt(int32_t a, int32_t b, int32_t c) { return __smlawt(a, b, c); } @@ -1335,6 +1381,9 @@ int32_t test_smusdx(int16x2_t a, int16x2_t b) { // AArch64-NEXT: [[TMP1:%.*]] = call i32 @llvm.aarch64.crc32b(i32 [[A:%.*]], i32 [[TMP0]]) // AArch64-NEXT: ret i32 [[TMP1]] // +#ifndef __ARM_FEATURE_CRC32 +__attribute__((target("crc"))) +#endif uint32_t test_crc32b(uint32_t a, uint8_t b) { return __crc32b(a, b); } @@ -1351,6 +1400,9 @@ uint32_t test_crc32b(uint32_t a, uint8_t b) { // AArch64-NEXT: [[TMP1:%.*]] = call i32 @llvm.aarch64.crc32h(i32 [[A:%.*]], i32 [[TMP0]]) // AArch64-NEXT: ret i32 [[TMP1]] // +#ifndef __ARM_FEATURE_CRC32 +__attribute__((target("crc"))) +#endif uint32_t test_crc32h(uint32_t a, uint16_t b) { return __crc32h(a, b); } @@ -1365,6 +1417,9 @@ uint32_t test_crc32h(uint32_t a, uint16_t b) { // AArch64-NEXT: [[TMP0:%.*]] = call i32 @llvm.aarch64.crc32w(i32 [[A:%.*]], i32 [[B:%.*]]) // AArch64-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_CRC32 +__attribute__((target("crc"))) +#endif uint32_t test_crc32w(uint32_t a, uint32_t b) { return __crc32w(a, b); } @@ -1383,6 +1438,9 @@ uint32_t test_crc32w(uint32_t a, uint32_t b) { // AArch64-NEXT: [[TMP0:%.*]] = call i32 @llvm.aarch64.crc32x(i32 [[A:%.*]], i64 [[B:%.*]]) // AArch64-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_CRC32 +__attribute__((target("crc"))) +#endif uint32_t test_crc32d(uint32_t a, uint64_t b) { return __crc32d(a, b); } @@ -1399,6 +1457,9 @@ uint32_t test_crc32d(uint32_t a, uint64_t b) { // AArch64-NEXT: [[TMP1:%.*]] = call i32 @llvm.aarch64.crc32cb(i32 [[A:%.*]], i32 [[TMP0]]) // AArch64-NEXT: ret i32 [[TMP1]] // +#ifndef __ARM_FEATURE_CRC32 +__attribute__((target("crc"))) +#endif uint32_t test_crc32cb(uint32_t a, uint8_t b) { return __crc32cb(a, b); } @@ -1415,6 +1476,9 @@ uint32_t test_crc32cb(uint32_t a, uint8_t b) { // AArch64-NEXT: [[TMP1:%.*]] = call i32 @llvm.aarch64.crc32ch(i32 [[A:%.*]], i32 [[TMP0]]) // AArch64-NEXT: ret i32 [[TMP1]] // +#ifndef __ARM_FEATURE_CRC32 +__attribute__((target("crc"))) +#endif uint32_t test_crc32ch(uint32_t a, uint16_t b) { return __crc32ch(a, b); } @@ -1429,6 +1493,9 @@ uint32_t test_crc32ch(uint32_t a, uint16_t b) { // AArch64-NEXT: [[TMP0:%.*]] = call i32 @llvm.aarch64.crc32cw(i32 [[A:%.*]], i32 [[B:%.*]]) // AArch64-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_CRC32 +__attribute__((target("crc"))) +#endif uint32_t test_crc32cw(uint32_t a, uint32_t b) { return __crc32cw(a, b); } @@ -1447,6 +1514,9 @@ uint32_t test_crc32cw(uint32_t a, uint32_t b) { // AArch64-NEXT: [[TMP0:%.*]] = call i32 @llvm.aarch64.crc32cx(i32 [[A:%.*]], i64 [[B:%.*]]) // AArch64-NEXT: ret i32 [[TMP0]] // +#ifndef __ARM_FEATURE_CRC32 +__attribute__((target("crc"))) +#endif uint32_t test_crc32cd(uint32_t a, uint64_t b) { return __crc32cd(a, b); } diff --git a/clang/test/Driver/codegen-data.c b/clang/test/Driver/codegen-data.c new file mode 100644 index 0000000..28638f6 --- /dev/null +++ b/clang/test/Driver/codegen-data.c @@ -0,0 +1,38 @@ +// Verify only one of codegen-data flag is passed. +// RUN: not %clang -### -S --target=aarch64-linux-gnu -fcodegen-data-generate -fcodegen-data-use %s 2>&1 | FileCheck %s --check-prefix=CONFLICT +// RUN: not %clang -### -S --target=arm64-apple-darwin -fcodegen-data-generate -fcodegen-data-use %s 2>&1 | FileCheck %s --check-prefix=CONFLICT +// CONFLICT: error: invalid argument '-fcodegen-data-generate' not allowed with '-fcodegen-data-use' + +// Verify the codegen-data-generate (boolean) flag is passed to LLVM +// RUN: %clang -### -S --target=aarch64-linux-gnu -fcodegen-data-generate %s 2>&1| FileCheck %s --check-prefix=GENERATE +// RUN: %clang -### -S --target=arm64-apple-darwin -fcodegen-data-generate %s 2>&1| FileCheck %s --check-prefix=GENERATE +// GENERATE: "-mllvm" "-codegen-data-generate" + +// Verify the codegen-data-use-path flag (with a default value) is passed to LLVM. +// RUN: %clang -### -S --target=aarch64-linux-gnu -fcodegen-data-use %s 2>&1| FileCheck %s --check-prefix=USE +// RUN: %clang -### -S --target=arm64-apple-darwin -fcodegen-data-use %s 2>&1| FileCheck %s --check-prefix=USE +// RUN: %clang -### -S --target=aarch64-linux-gnu -fcodegen-data-use=file %s 2>&1 | FileCheck %s --check-prefix=USE-FILE +// RUN: %clang -### -S --target=arm64-apple-darwin -fcodegen-data-use=file %s 2>&1 | FileCheck %s --check-prefix=USE-FILE +// USE: "-mllvm" "-codegen-data-use-path=default.cgdata" +// USE-FILE: "-mllvm" "-codegen-data-use-path=file" + +// Verify the codegen-data-generate (boolean) flag with a LTO. +// RUN: %clang -### -flto --target=aarch64-linux-gnu -fcodegen-data-generate %s 2>&1 | FileCheck %s --check-prefix=GENERATE-LTO +// GENERATE-LTO: {{ld(.exe)?"}} +// GENERATE-LTO-SAME: "-plugin-opt=-codegen-data-generate" +// RUN: %clang -### -flto --target=arm64-apple-darwin -fcodegen-data-generate %s 2>&1 | FileCheck %s --check-prefix=GENERATE-LTO-DARWIN +// GENERATE-LTO-DARWIN: {{ld(.exe)?"}} +// GENERATE-LTO-DARWIN-SAME: "-mllvm" "-codegen-data-generate" + +// Verify the codegen-data-use-path flag with a LTO is passed to LLVM. +// RUN: %clang -### -flto=thin --target=aarch64-linux-gnu -fcodegen-data-use %s 2>&1 | FileCheck %s --check-prefix=USE-LTO +// USE-LTO: {{ld(.exe)?"}} +// USE-LTO-SAME: "-plugin-opt=-codegen-data-use-path=default.cgdata" +// RUN: %clang -### -flto=thin --target=arm64-apple-darwin -fcodegen-data-use %s 2>&1 | FileCheck %s --check-prefix=USE-LTO-DARWIN +// USE-LTO-DARWIN: {{ld(.exe)?"}} +// USE-LTO-DARWIN-SAME: "-mllvm" "-codegen-data-use-path=default.cgdata" + +// For now, LLD MachO supports for generating the codegen data at link time. +// RUN: %clang -### -fuse-ld=lld -B%S/Inputs/lld --target=arm64-apple-darwin -fcodegen-data-generate %s 2>&1 | FileCheck %s --check-prefix=GENERATE-LLD-DARWIN +// GENERATE-LLD-DARWIN: {{ld(.exe)?"}} +// GENERATE-LLD-DARWIN-SAME: "--codegen-data-generate-path=default.cgdata" diff --git a/clang/test/Driver/riscv-cpus.c b/clang/test/Driver/riscv-cpus.c index 481eaae..d36639d 100644 --- a/clang/test/Driver/riscv-cpus.c +++ b/clang/test/Driver/riscv-cpus.c @@ -502,3 +502,29 @@ // RUN: %clang --target=riscv64 -### -c %s 2>&1 -mtune=syntacore-scr5-rv64 | FileCheck -check-prefix=MTUNE-SYNTACORE-SCR5-RV64 %s // MTUNE-SYNTACORE-SCR5-RV64: "-tune-cpu" "syntacore-scr5-rv64" + +// RUN: %clang --target=riscv64 -### -c %s 2>&1 -mcpu=syntacore-scr7 | FileCheck -check-prefix=MCPU-SYNTACORE-SCR7 %s +// MCPU-SYNTACORE-SCR7: "-target-cpu" "syntacore-scr7" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+m" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+a" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+f" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+d" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+c" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+v" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+zicsr" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+zifencei" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+zba" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+zbb" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+zbc" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+zbkb" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+zbkc" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+zbkx" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+zbs" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+zkn" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+zknd" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+zkne" +// MCPU-SYNTACORE-SCR7-SAME: "-target-feature" "+zknh" +// MCPU-SYNTACORE-SCR7-SAME: "-target-abi" "lp64d" + +// RUN: %clang --target=riscv64 -### -c %s 2>&1 -mtune=syntacore-scr7 | FileCheck -check-prefix=MTUNE-SYNTACORE-SCR7 %s +// MTUNE-SYNTACORE-SCR7: "-tune-cpu" "syntacore-scr7" diff --git a/clang/test/Misc/target-invalid-cpu-note/riscv.c b/clang/test/Misc/target-invalid-cpu-note/riscv.c index 96d3cef..7bbf357 100644 --- a/clang/test/Misc/target-invalid-cpu-note/riscv.c +++ b/clang/test/Misc/target-invalid-cpu-note/riscv.c @@ -40,6 +40,7 @@ // RISCV64-SAME: {{^}}, syntacore-scr3-rv64 // RISCV64-SAME: {{^}}, syntacore-scr4-rv64 // RISCV64-SAME: {{^}}, syntacore-scr5-rv64 +// RISCV64-SAME: {{^}}, syntacore-scr7 // RISCV64-SAME: {{^}}, veyron-v1 // RISCV64-SAME: {{^}}, xiangshan-nanhu // RISCV64-SAME: {{$}} @@ -85,6 +86,7 @@ // TUNE-RISCV64-SAME: {{^}}, syntacore-scr3-rv64 // TUNE-RISCV64-SAME: {{^}}, syntacore-scr4-rv64 // TUNE-RISCV64-SAME: {{^}}, syntacore-scr5-rv64 +// TUNE-RISCV64-SAME: {{^}}, syntacore-scr7 // TUNE-RISCV64-SAME: {{^}}, veyron-v1 // TUNE-RISCV64-SAME: {{^}}, xiangshan-nanhu // TUNE-RISCV64-SAME: {{^}}, generic diff --git a/clang/test/Modules/cxx20-10-3-ex1.cpp b/clang/test/Modules/cxx20-10-3-ex1.cpp index 99b88c7..82ecb40 100644 --- a/clang/test/Modules/cxx20-10-3-ex1.cpp +++ b/clang/test/Modules/cxx20-10-3-ex1.cpp @@ -37,6 +37,7 @@ module M:PartImpl; export module M; // error: exported partition :Part is an implementation unit export import :PartImpl; // expected-error {{module partition implementations cannot be exported}} + // expected-warning@-1 {{importing an implementation partition unit in a module interface is not recommended.}} //--- std10-3-ex1-tu3.cpp export module M:Part; diff --git a/clang/test/Parser/static_assert.cpp b/clang/test/Parser/static_assert.cpp new file mode 100644 index 0000000..4fe7d3c --- /dev/null +++ b/clang/test/Parser/static_assert.cpp @@ -0,0 +1,6 @@ +// RUN: %clang_cc1 -fsyntax-only -triple=x86_64-linux -std=c++2a -verify=cxx2a %s +// RUN: %clang_cc1 -fsyntax-only -triple=x86_64-linux -std=c++2c -verify=cxx2c %s + +static_assert(true, "" // cxx2a-warning {{'static_assert' with a user-generated message is a C++26 extension}} \ + // cxx2a-note {{to match this '('}} cxx2c-note {{to match this '('}} + // cxx2a-error {{expected ')'}} cxx2c-error {{expected ')'}} diff --git a/clang/test/Sema/diagnose_if.c b/clang/test/Sema/diagnose_if.c index 4df3991..e9b8497 100644 --- a/clang/test/Sema/diagnose_if.c +++ b/clang/test/Sema/diagnose_if.c @@ -2,10 +2,10 @@ #define _diagnose_if(...) __attribute__((diagnose_if(__VA_ARGS__))) -void failure1(void) _diagnose_if(); // expected-error{{exactly 3 arguments}} -void failure2(void) _diagnose_if(0); // expected-error{{exactly 3 arguments}} -void failure3(void) _diagnose_if(0, ""); // expected-error{{exactly 3 arguments}} -void failure4(void) _diagnose_if(0, "", "error", 1); // expected-error{{exactly 3 arguments}} +void failure1(void) _diagnose_if(); // expected-error{{at least 3 arguments}} +void failure2(void) _diagnose_if(0); // expected-error{{at least 3 arguments}} +void failure3(void) _diagnose_if(0, ""); // expected-error{{at least 3 arguments}} +void failure4(void) _diagnose_if(0, "", "error", 1); // expected-error{{expected string literal as argument}} void failure5(void) _diagnose_if(0, 0, "error"); // expected-error{{expected string literal as argument of 'diagnose_if' attribute}} void failure6(void) _diagnose_if(0, "", "invalid"); // expected-error{{invalid diagnostic type for 'diagnose_if'; use "error" or "warning" instead}} void failure7(void) _diagnose_if(0, "", "ERROR"); // expected-error{{invalid diagnostic type}} diff --git a/clang/test/Sema/warn-lifetime-analysis-nocfg.cpp b/clang/test/Sema/warn-lifetime-analysis-nocfg.cpp index 59357d0..69e5395 100644 --- a/clang/test/Sema/warn-lifetime-analysis-nocfg.cpp +++ b/clang/test/Sema/warn-lifetime-analysis-nocfg.cpp @@ -553,3 +553,37 @@ void test() { std::string_view svjkk1 = ReturnStringView(StrCat("bar", "x")); // expected-warning {{object backing the pointer will be destroyed at the end of the full-expression}} } } // namespace GH100549 + +namespace GH108272 { +template <typename T> +struct [[gsl::Owner]] StatusOr { + const T &value() [[clang::lifetimebound]]; +}; + +template <typename V> +class Wrapper1 { + public: + operator V() const; + V value; +}; +std::string_view test1() { + StatusOr<Wrapper1<std::string_view>> k; + // Be conservative in this case, as there is not enough information available + // to infer the lifetime relationship for the Wrapper1 type. + std::string_view good = StatusOr<Wrapper1<std::string_view>>().value(); + return k.value(); +} + +template <typename V> +class Wrapper2 { + public: + operator V() const [[clang::lifetimebound]]; + V value; +}; +std::string_view test2() { + StatusOr<Wrapper2<std::string_view>> k; + // We expect dangling issues as the conversion operator is lifetimebound。 + std::string_view bad = StatusOr<Wrapper2<std::string_view>>().value(); // expected-warning {{temporary whose address is used as value of}} + return k.value(); // expected-warning {{address of stack memory associated}} +} +} // namespace GH108272 diff --git a/clang/test/SemaCXX/diagnose_if-warning-group.cpp b/clang/test/SemaCXX/diagnose_if-warning-group.cpp new file mode 100644 index 0000000..a39c0c0 --- /dev/null +++ b/clang/test/SemaCXX/diagnose_if-warning-group.cpp @@ -0,0 +1,63 @@ +// RUN: %clang_cc1 %s -verify=expected,wall -fno-builtin -Wno-pedantic -Werror=comment -Wno-error=abi -Wfatal-errors=assume -Wno-fatal-errors=assume -Wno-format +// RUN: %clang_cc1 %s -verify=expected,wno-all,pedantic,format -fno-builtin -Wno-all -Werror=comment -Wno-error=abi -Werror=assume -Wformat + +#define diagnose_if(...) __attribute__((diagnose_if(__VA_ARGS__))) + +#ifndef EMTY_WARNING_GROUP +void bougus_warning() diagnose_if(true, "oh no", "warning", "bogus warning") {} // expected-error {{unknown warning group 'bogus warning'}} + +void show_in_system_header() diagnose_if(true, "oh no", "warning", "assume", "Banane") {} // expected-error {{'diagnose_if' attribute takes no more than 4 arguments}} +#endif // EMTY_WARNING_GROUP + +template <bool b> +void diagnose_if_wcomma() diagnose_if(b, "oh no", "warning", "comma") {} + +template <bool b> +void diagnose_if_wcomment() diagnose_if(b, "oh no", "warning", "comment") {} + +void empty_warning_group() diagnose_if(true, "oh no", "warning", "") {} // expected-error {{unknown warning group ''}} +void empty_warning_group_error() diagnose_if(true, "oh no", "error", "") {} // expected-error {{unknown warning group ''}} + +void diagnose_if_wabi_default_error() diagnose_if(true, "ABI stuff", "error", "abi") {} +void diagnose_assume() diagnose_if(true, "Assume diagnostic", "warning", "assume") {} + +void Wall() diagnose_if(true, "oh no", "warning", "all") {} +void Wpedantic() diagnose_if(true, "oh no", "warning", "pedantic") {} +void Wformat_extra_args() diagnose_if(true, "oh no", "warning", "format-extra-args") {} + +void call() { + diagnose_if_wcomma<true>(); // expected-warning {{oh no}} + diagnose_if_wcomma<false>(); + diagnose_if_wcomment<true>(); // expected-error {{oh no}} + diagnose_if_wcomment<false>(); + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wcomma" + diagnose_if_wcomma<true>(); + diagnose_if_wcomment<true>(); // expected-error {{oh no}} +#pragma clang diagnostic pop + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wcomment" + diagnose_if_wcomma<true>(); // expected-warning {{oh no}} + diagnose_if_wcomment<true>(); +#pragma clang diagnostic pop + + diagnose_if_wcomma<true>(); // expected-warning {{oh no}} + diagnose_if_wcomment<true>(); // expected-error {{oh no}} + + diagnose_if_wabi_default_error(); // expected-warning {{ABI stuff}} + diagnose_assume(); // expected-error {{Assume diagnostic}} + + // Make sure that the -Wassume diagnostic isn't fatal + diagnose_if_wabi_default_error(); // expected-warning {{ABI stuff}} + + Wall(); // wall-warning {{oh no}} + Wpedantic(); // pedantic-warning {{oh no}} + Wformat_extra_args(); // format-warning {{oh no}} + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wformat" + Wformat_extra_args(); +#pragma clang diagnostic pop +} diff --git a/clang/test/SemaOpenCL/unsupported.cl b/clang/test/SemaOpenCL/unsupported.cl index 75175c8..7195ceb 100644 --- a/clang/test/SemaOpenCL/unsupported.cl +++ b/clang/test/SemaOpenCL/unsupported.cl @@ -17,5 +17,7 @@ void no_vla(int n) { } void no_logxor(int n) { - int logxor = n ^^ n; // expected-error {{^^ is a reserved operator in OpenCL}} + int logxor = n ^^ n; // expected-error {{^^ is a reserved operator in OpenCL}} \ + expected-error {{type name requires a specifier or qualifier}} \ + expected-error {{expected expression}} } diff --git a/clang/test/SemaTemplate/cwg2398.cpp b/clang/test/SemaTemplate/cwg2398.cpp index 1d97472..6dc7af6 100644 --- a/clang/test/SemaTemplate/cwg2398.cpp +++ b/clang/test/SemaTemplate/cwg2398.cpp @@ -379,3 +379,13 @@ namespace regression1 { bar(input); } } // namespace regression1 + +namespace regression2 { + template <class> struct D {}; + + template <class ET, template <class> class VT> + struct D<VT<ET>>; + + template <typename, int> struct Matrix; + template struct D<Matrix<double, 3>>; +} // namespace regression2 diff --git a/clang/tools/clang-refactor/ClangRefactor.cpp b/clang/tools/clang-refactor/ClangRefactor.cpp index 175a2b8..9310263 100644 --- a/clang/tools/clang-refactor/ClangRefactor.cpp +++ b/clang/tools/clang-refactor/ClangRefactor.cpp @@ -560,7 +560,6 @@ private: << "' can't be invoked with the given arguments:\n"; for (const auto &Opt : MissingOptions) OS << " missing '-" << Opt.getKey() << "' option\n"; - OS.flush(); return llvm::make_error<llvm::StringError>( Error, llvm::inconvertibleErrorCode()); } @@ -591,7 +590,6 @@ private: OS << "note: the following actions are supported:\n"; for (const auto &Subcommand : SubCommands) OS.indent(2) << Subcommand->getName() << "\n"; - OS.flush(); return llvm::make_error<llvm::StringError>( Error, llvm::inconvertibleErrorCode()); } diff --git a/clang/tools/diagtool/ListWarnings.cpp b/clang/tools/diagtool/ListWarnings.cpp index a71f6e3..9f96471 100644 --- a/clang/tools/diagtool/ListWarnings.cpp +++ b/clang/tools/diagtool/ListWarnings.cpp @@ -53,13 +53,13 @@ int ListWarnings::run(unsigned int argc, char **argv, llvm::raw_ostream &out) { for (const DiagnosticRecord &DR : getBuiltinDiagnosticsByName()) { const unsigned diagID = DR.DiagID; - if (DiagnosticIDs::isBuiltinNote(diagID)) + if (DiagnosticIDs{}.isNote(diagID)) continue; - if (!DiagnosticIDs::isBuiltinWarningOrExtension(diagID)) + if (!DiagnosticIDs{}.isWarningOrExtension(diagID)) continue; - Entry entry(DR.getName(), DiagnosticIDs::getWarningOptionForDiag(diagID)); + Entry entry(DR.getName(), DiagnosticIDs{}.getWarningOptionForDiag(diagID)); if (entry.Flag.empty()) Unflagged.push_back(entry); @@ -97,4 +97,3 @@ int ListWarnings::run(unsigned int argc, char **argv, llvm::raw_ostream &out) { return 0; } - diff --git a/clang/tools/diagtool/ShowEnabledWarnings.cpp b/clang/tools/diagtool/ShowEnabledWarnings.cpp index 66a295d..caf6722 100644 --- a/clang/tools/diagtool/ShowEnabledWarnings.cpp +++ b/clang/tools/diagtool/ShowEnabledWarnings.cpp @@ -117,10 +117,10 @@ int ShowEnabledWarnings::run(unsigned int argc, char **argv, raw_ostream &Out) { for (const DiagnosticRecord &DR : getBuiltinDiagnosticsByName()) { unsigned DiagID = DR.DiagID; - if (DiagnosticIDs::isBuiltinNote(DiagID)) + if (DiagnosticIDs{}.isNote(DiagID)) continue; - if (!DiagnosticIDs::isBuiltinWarningOrExtension(DiagID)) + if (!DiagnosticIDs{}.isWarningOrExtension(DiagID)) continue; DiagnosticsEngine::Level DiagLevel = @@ -128,7 +128,7 @@ int ShowEnabledWarnings::run(unsigned int argc, char **argv, raw_ostream &Out) { if (DiagLevel == DiagnosticsEngine::Ignored) continue; - StringRef WarningOpt = DiagnosticIDs::getWarningOptionForDiag(DiagID); + StringRef WarningOpt = DiagnosticIDs{}.getWarningOptionForDiag(DiagID); Active.push_back(PrettyDiag(DR.getName(), WarningOpt, DiagLevel)); } diff --git a/clang/tools/driver/cc1gen_reproducer_main.cpp b/clang/tools/driver/cc1gen_reproducer_main.cpp index e97fa3d..be081ca 100644 --- a/clang/tools/driver/cc1gen_reproducer_main.cpp +++ b/clang/tools/driver/cc1gen_reproducer_main.cpp @@ -105,8 +105,8 @@ static std::string generateReproducerMetaInfo(const ClangInvocationInfo &Info) { OS << '}'; // FIXME: Compare unsaved file hashes and report mismatch in the reproducer. if (Info.Dump) - llvm::outs() << "REPRODUCER METAINFO: " << OS.str() << "\n"; - return std::move(OS.str()); + llvm::outs() << "REPRODUCER METAINFO: " << Result << "\n"; + return Result; } /// Generates a reproducer for a set of arguments from a specific invocation. diff --git a/clang/tools/libclang/CXStoredDiagnostic.cpp b/clang/tools/libclang/CXStoredDiagnostic.cpp index 0301822..6fb3050 100644 --- a/clang/tools/libclang/CXStoredDiagnostic.cpp +++ b/clang/tools/libclang/CXStoredDiagnostic.cpp @@ -51,7 +51,9 @@ CXString CXStoredDiagnostic::getSpelling() const { CXString CXStoredDiagnostic::getDiagnosticOption(CXString *Disable) const { unsigned ID = Diag.getID(); - StringRef Option = DiagnosticIDs::getWarningOptionForDiag(ID); + if (DiagnosticIDs::IsCustomDiag(ID)) + return cxstring::createEmpty(); + StringRef Option = DiagnosticIDs{}.getWarningOptionForDiag(ID); if (!Option.empty()) { if (Disable) *Disable = cxstring::createDup((Twine("-Wno-") + Option).str()); diff --git a/clang/unittests/AST/SourceLocationTest.cpp b/clang/unittests/AST/SourceLocationTest.cpp index 66daa56..daea2d6 100644 --- a/clang/unittests/AST/SourceLocationTest.cpp +++ b/clang/unittests/AST/SourceLocationTest.cpp @@ -105,7 +105,7 @@ protected: RParenLoc.print(Msg, *Result.SourceManager); Msg << ">"; - this->setFailure(Msg.str()); + this->setFailure(MsgStr); } } }; diff --git a/clang/unittests/AST/TemplateNameTest.cpp b/clang/unittests/AST/TemplateNameTest.cpp index 444ccfb..2eac5c5 100644 --- a/clang/unittests/AST/TemplateNameTest.cpp +++ b/clang/unittests/AST/TemplateNameTest.cpp @@ -21,7 +21,7 @@ std::string printTemplateName(TemplateName TN, const PrintingPolicy &Policy, std::string Result; llvm::raw_string_ostream Out(Result); TN.print(Out, Policy, Qual); - return Out.str(); + return Result; } TEST(TemplateName, PrintTemplate) { diff --git a/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp b/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp index 9c4ec07..137baab 100644 --- a/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp +++ b/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp @@ -78,7 +78,7 @@ mutatedBy(const SmallVectorImpl<BoundNodes> &Results, ASTUnit *AST) { std::string Buffer; llvm::raw_string_ostream Stream(Buffer); By->printPretty(Stream, nullptr, AST->getASTContext().getPrintingPolicy()); - Chain.emplace_back(StringRef(Stream.str()).trim().str()); + Chain.emplace_back(StringRef(Buffer).trim().str()); E = dyn_cast<DeclRefExpr>(By); } return Chain; diff --git a/clang/unittests/Frontend/OutputStreamTest.cpp b/clang/unittests/Frontend/OutputStreamTest.cpp index 7d360f6..2618558 100644 --- a/clang/unittests/Frontend/OutputStreamTest.cpp +++ b/clang/unittests/Frontend/OutputStreamTest.cpp @@ -67,7 +67,7 @@ TEST(FrontendOutputTests, TestVerboseOutputStreamShared) { bool Success = ExecuteCompilerInvocation(&Compiler); EXPECT_FALSE(Success); - EXPECT_TRUE(!VerboseStream.str().empty()); + EXPECT_TRUE(!VerboseBuffer.empty()); EXPECT_TRUE(StringRef(VerboseBuffer.data()).contains("errors generated")); } diff --git a/clang/unittests/StaticAnalyzer/RangeSetTest.cpp b/clang/unittests/StaticAnalyzer/RangeSetTest.cpp index 318877c..9e36aab 100644 --- a/clang/unittests/StaticAnalyzer/RangeSetTest.cpp +++ b/clang/unittests/StaticAnalyzer/RangeSetTest.cpp @@ -25,7 +25,7 @@ template <class RangeOrSet> static std::string toString(const RangeOrSet &Obj) { std::string ObjRepresentation; llvm::raw_string_ostream SS(ObjRepresentation); Obj.dump(SS); - return SS.str(); + return ObjRepresentation; } LLVM_ATTRIBUTE_UNUSED static std::string toString(const llvm::APSInt &Point) { return toString(Point, 10); diff --git a/clang/unittests/Tooling/ASTSelectionTest.cpp b/clang/unittests/Tooling/ASTSelectionTest.cpp index 38b7df8..113165f 100644 --- a/clang/unittests/Tooling/ASTSelectionTest.cpp +++ b/clang/unittests/Tooling/ASTSelectionTest.cpp @@ -171,7 +171,7 @@ TEST(ASTSelectionFinder, CursorAtStartOfFunction) { std::string DumpValue; llvm::raw_string_ostream OS(DumpValue); Node->Children[0].dump(OS); - ASSERT_EQ(OS.str(), "FunctionDecl \"f\" contains-selection\n"); + ASSERT_EQ(DumpValue, "FunctionDecl \"f\" contains-selection\n"); }); } diff --git a/clang/unittests/Tooling/DiagnosticsYamlTest.cpp b/clang/unittests/Tooling/DiagnosticsYamlTest.cpp index 6d3b4b9..52d8194 100644 --- a/clang/unittests/Tooling/DiagnosticsYamlTest.cpp +++ b/clang/unittests/Tooling/DiagnosticsYamlTest.cpp @@ -151,7 +151,7 @@ TEST(DiagnosticsYamlTest, serializesDiagnostics) { yaml::Output YAML(YamlContentStream); YAML << TUD; - EXPECT_EQ(YAMLContent, YamlContentStream.str()); + EXPECT_EQ(YAMLContent, YamlContent); } TEST(DiagnosticsYamlTest, deserializesDiagnostics) { diff --git a/clang/unittests/Tooling/RecursiveASTVisitorTestDeclVisitor.cpp b/clang/unittests/Tooling/RecursiveASTVisitorTestDeclVisitor.cpp index e207f03..d72a110 100644 --- a/clang/unittests/Tooling/RecursiveASTVisitorTestDeclVisitor.cpp +++ b/clang/unittests/Tooling/RecursiveASTVisitorTestDeclVisitor.cpp @@ -67,7 +67,7 @@ public: Decl->getNameForDiagnostic(OS, Decl->getASTContext().getPrintingPolicy(), true); - Match(OS.str(), Decl->getLocation()); + Match(NameWithTemplateArgs, Decl->getLocation()); return true; } }; diff --git a/clang/unittests/Tooling/RecursiveASTVisitorTests/TemplateArgumentLocTraverser.cpp b/clang/unittests/Tooling/RecursiveASTVisitorTests/TemplateArgumentLocTraverser.cpp index f068e53..b87e89f 100644 --- a/clang/unittests/Tooling/RecursiveASTVisitorTests/TemplateArgumentLocTraverser.cpp +++ b/clang/unittests/Tooling/RecursiveASTVisitorTests/TemplateArgumentLocTraverser.cpp @@ -21,7 +21,7 @@ public: const TemplateArgument &Arg = ArgLoc.getArgument(); Arg.print(Context->getPrintingPolicy(), Stream, /*IncludeType*/ true); - Match(Stream.str(), ArgLoc.getLocation()); + Match(ArgStr, ArgLoc.getLocation()); return ExpectedLocationVisitor<TemplateArgumentLocTraverser>:: TraverseTemplateArgumentLoc(ArgLoc); } diff --git a/clang/unittests/Tooling/RefactoringTest.cpp b/clang/unittests/Tooling/RefactoringTest.cpp index 77d410f..4f0cccdc 100644 --- a/clang/unittests/Tooling/RefactoringTest.cpp +++ b/clang/unittests/Tooling/RefactoringTest.cpp @@ -135,7 +135,6 @@ static bool checkReplacementError(llvm::Error &&Error, << "\n"; } }); - OS.flush(); if (ErrorMessage.empty()) return true; llvm::errs() << ErrorMessage; return false; diff --git a/clang/unittests/Tooling/RewriterTestContext.h b/clang/unittests/Tooling/RewriterTestContext.h index a618ebd..b7aa1a1 100644 --- a/clang/unittests/Tooling/RewriterTestContext.h +++ b/clang/unittests/Tooling/RewriterTestContext.h @@ -109,7 +109,6 @@ class RewriterTestContext { std::string Result; llvm::raw_string_ostream OS(Result); Rewrite.getEditBuffer(ID).write(OS); - OS.flush(); return Result; } diff --git a/clang/utils/TableGen/ASTTableGen.cpp b/clang/utils/TableGen/ASTTableGen.cpp index 4734477..46bb6c2 100644 --- a/clang/utils/TableGen/ASTTableGen.cpp +++ b/clang/utils/TableGen/ASTTableGen.cpp @@ -21,7 +21,7 @@ using namespace llvm; using namespace clang; using namespace clang::tblgen; -llvm::StringRef clang::tblgen::HasProperties::getName() const { +StringRef clang::tblgen::HasProperties::getName() const { if (auto node = getAs<ASTNode>()) { return node.getName(); } else if (auto typeCase = getAs<TypeCase>()) { diff --git a/clang/utils/TableGen/ClangASTNodesEmitter.cpp b/clang/utils/TableGen/ClangASTNodesEmitter.cpp index 512af83..9421f48 100644 --- a/clang/utils/TableGen/ClangASTNodesEmitter.cpp +++ b/clang/utils/TableGen/ClangASTNodesEmitter.cpp @@ -16,7 +16,6 @@ #include "llvm/TableGen/Error.h" #include "llvm/TableGen/Record.h" #include "llvm/TableGen/TableGenBackend.h" -#include <cctype> #include <map> #include <set> #include <string> @@ -42,17 +41,12 @@ class ClangASTNodesEmitter { ChildMap Tree; // Create a macro-ized version of a name - static std::string macroName(std::string S) { - for (unsigned i = 0; i < S.size(); ++i) - S[i] = std::toupper(S[i]); - - return S; - } + static std::string macroName(StringRef S) { return S.upper(); } const std::string ¯oHierarchyName() { assert(Root && "root node not yet derived!"); if (MacroHierarchyName.empty()) - MacroHierarchyName = macroName(std::string(Root.getName())); + MacroHierarchyName = macroName(Root.getName()); return MacroHierarchyName; } @@ -93,34 +87,30 @@ public: // Called recursively to ensure that nodes remain contiguous std::pair<ASTNode, ASTNode> ClangASTNodesEmitter::EmitNode(raw_ostream &OS, ASTNode Base) { - std::string BaseName = macroName(std::string(Base.getName())); + std::string BaseName = macroName(Base.getName()); - ChildIterator i = Tree.lower_bound(Base), e = Tree.upper_bound(Base); - bool HasChildren = (i != e); + auto [II, E] = Tree.equal_range(Base); + bool HasChildren = II != E; ASTNode First, Last; if (!Base.isAbstract()) First = Last = Base; - auto comp = [this](ASTNode LHS, ASTNode RHS) { - auto LHSPrioritized = PrioritizedClasses.count(LHS) > 0; - auto RHSPrioritized = PrioritizedClasses.count(RHS) > 0; - if (LHSPrioritized && !RHSPrioritized) - return true; - if (!LHSPrioritized && RHSPrioritized) - return false; - - return LHS.getName() > RHS.getName(); + auto Comp = [this](const ASTNode &LHS, const ASTNode &RHS) { + bool LHSPrioritized = PrioritizedClasses.count(LHS) > 0; + bool RHSPrioritized = PrioritizedClasses.count(RHS) > 0; + return std::tuple(LHSPrioritized, LHS.getName()) > + std::tuple(RHSPrioritized, RHS.getName()); }; - auto SortedChildren = std::set<ASTNode, decltype(comp)>(comp); + auto SortedChildren = std::set<ASTNode, decltype(Comp)>(Comp); - for (; i != e; ++i) { - SortedChildren.insert(i->second); + for (; II != E; ++II) { + SortedChildren.insert(II->second); } for (const auto &Child : SortedChildren) { bool Abstract = Child.isAbstract(); - std::string NodeName = macroName(std::string(Child.getName())); + std::string NodeName = macroName(Child.getName()); OS << "#ifndef " << NodeName << "\n"; OS << "# define " << NodeName << "(Type, Base) " @@ -144,9 +134,8 @@ std::pair<ASTNode, ASTNode> ClangASTNodesEmitter::EmitNode(raw_ostream &OS, // If there aren't first/last nodes, it must be because there were no // children and this node was abstract, which is not a sensible combination. - if (!First) { + if (!First) PrintFatalError(Base.getLoc(), "abstract node has no children"); - } assert(Last && "set First without Last"); if (HasChildren) { @@ -169,7 +158,7 @@ void ClangASTNodesEmitter::deriveChildTree() { // Emit statements for (const Record *R : Records.getAllDerivedDefinitions(NodeClassName)) { if (auto B = R->getValueAsOptionalDef(BaseFieldName)) - Tree.insert(std::make_pair(B, R)); + Tree.insert({B, R}); else if (Root) PrintFatalError(R->getLoc(), Twine("multiple root nodes in \"") + NodeClassName @@ -222,10 +211,9 @@ void printDeclContext(const std::multimap<const Record *, const Record *> &Tree, const Record *DeclContext, raw_ostream &OS) { if (!DeclContext->getValueAsBit(AbstractFieldName)) OS << "DECL_CONTEXT(" << DeclContext->getName() << ")\n"; - auto i = Tree.lower_bound(DeclContext); - auto end = Tree.upper_bound(DeclContext); - for (; i != end; ++i) { - printDeclContext(Tree, i->second, OS); + auto [II, E] = Tree.equal_range(DeclContext); + for (; II != E; ++II) { + printDeclContext(Tree, II->second, OS); } } @@ -244,7 +232,7 @@ void clang::EmitClangDeclContext(const RecordKeeper &Records, raw_ostream &OS) { for (const Record *R : Records.getAllDerivedDefinitions(DeclNodeClassName)) { if (auto *B = R->getValueAsOptionalDef(BaseFieldName)) - Tree.insert(std::make_pair(B, R)); + Tree.insert({B, R}); } for (const Record *DeclContext : diff --git a/clang/utils/TableGen/ClangASTPropertiesEmitter.cpp b/clang/utils/TableGen/ClangASTPropertiesEmitter.cpp index 2d67b6b..3151ff19 100644 --- a/clang/utils/TableGen/ClangASTPropertiesEmitter.cpp +++ b/clang/utils/TableGen/ClangASTPropertiesEmitter.cpp @@ -205,8 +205,7 @@ public: void visitAllNodesWithInfo( HasProperties derivedNode, const NodeInfo &derivedNodeInfo, - llvm::function_ref<void(HasProperties node, const NodeInfo &info)> - visit) { + function_ref<void(HasProperties node, const NodeInfo &info)> visit) { visit(derivedNode, derivedNodeInfo); // Also walk the bases if appropriate. diff --git a/clang/utils/TableGen/ClangAttrEmitter.cpp b/clang/utils/TableGen/ClangAttrEmitter.cpp index 9b2249a..87be48c 100644 --- a/clang/utils/TableGen/ClangAttrEmitter.cpp +++ b/clang/utils/TableGen/ClangAttrEmitter.cpp @@ -203,12 +203,11 @@ static ParsedAttrMap getParsedAttrList(const RecordKeeper &Records, // If this attribute has already been handled, it does not need to be // handled again. - if (Seen.find(AN) != Seen.end()) { + if (!Seen.insert(AN).second) { if (Dupes) Dupes->push_back(std::make_pair(AN, Attr)); continue; } - Seen.insert(AN); } else AN = NormalizeAttrName(Attr->getName()).str(); @@ -1475,7 +1474,7 @@ createArgument(const Record &Arg, StringRef Attr, Search = &Arg; std::unique_ptr<Argument> Ptr; - llvm::StringRef ArgName = Search->getName(); + StringRef ArgName = Search->getName(); if (ArgName == "AlignedArgument") Ptr = std::make_unique<AlignedArgument>(Arg, Attr); @@ -1532,7 +1531,7 @@ createArgument(const Record &Arg, StringRef Attr, if (!Ptr) { // Search in reverse order so that the most-derived type is handled first. ArrayRef<std::pair<Record*, SMRange>> Bases = Search->getSuperClasses(); - for (const auto &Base : llvm::reverse(Bases)) { + for (const auto &Base : reverse(Bases)) { if ((Ptr = createArgument(Arg, Attr, Base.first))) break; } @@ -1611,11 +1610,11 @@ writePrettyPrintFunction(const Record &R, << " break;\n"; for (unsigned I = 0; I < Spellings.size(); ++ I) { - llvm::SmallString<16> Prefix; - llvm::SmallString<8> Suffix; + SmallString<16> Prefix; + SmallString<8> Suffix; // The actual spelling of the name and namespace (if applicable) // of an attribute without considering prefix and suffix. - llvm::SmallString<64> Spelling; + SmallString<64> Spelling; std::string Name = Spellings[I].name(); std::string Variety = Spellings[I].variety(); @@ -1680,7 +1679,7 @@ writePrettyPrintFunction(const Record &R, // printing spurious commas at the end of an argument list, we need to // determine where the last provided non-fake argument is. bool FoundNonOptArg = false; - for (const auto &arg : llvm::reverse(Args)) { + for (const auto &arg : reverse(Args)) { if (arg->isFake()) continue; if (FoundNonOptArg) @@ -1776,7 +1775,7 @@ SpellingNamesAreCommon(const std::vector<FlattenedSpelling>& Spellings) { assert(!Spellings.empty() && "An empty list of spellings was provided"); std::string FirstName = std::string(NormalizeNameForSpellingComparison(Spellings.front().name())); - for (const auto &Spelling : llvm::drop_begin(Spellings)) { + for (const auto &Spelling : drop_begin(Spellings)) { std::string Name = std::string(NormalizeNameForSpellingComparison(Spelling.name())); if (Name != FirstName) @@ -1824,17 +1823,16 @@ CreateSemanticSpellings(const std::vector<FlattenedSpelling> &Spellings, // reserved namespace, we may have inadvertently created duplicate // enumerant names. These duplicates are not considered part of the // semantic spelling, and can be elided. - if (Uniques.find(EnumName) != Uniques.end()) + if (!Uniques.insert(EnumName).second) continue; - Uniques.insert(EnumName); if (I != Spellings.begin()) Ret += ",\n"; // Duplicate spellings are not considered part of the semantic spelling // enumeration, but the spelling index and semantic spelling values are // meant to be equivalent, so we must specify a concrete value for each // enumerator. - Ret += " " + EnumName + " = " + llvm::utostr(Idx); + Ret += " " + EnumName + " = " + utostr(Idx); } Ret += ",\n SpellingNotCalculated = 15\n"; Ret += "\n };\n\n"; @@ -1871,15 +1869,14 @@ static LateAttrParseKind getLateAttrParseKind(const Record *Attr) { SmallVector<Record *, 1> SuperClasses; LAPK->getDirectSuperClasses(SuperClasses); if (SuperClasses.size() != 1) - PrintFatalError(Attr, "Field `" + llvm::Twine(LateParsedStr) + + PrintFatalError(Attr, "Field `" + Twine(LateParsedStr) + "`should only have one super class"); if (SuperClasses[0]->getName() != LateAttrParseKindStr) - PrintFatalError(Attr, "Field `" + llvm::Twine(LateParsedStr) + - "`should only have type `" + - llvm::Twine(LateAttrParseKindStr) + - "` but found type `" + - SuperClasses[0]->getName() + "`"); + PrintFatalError( + Attr, "Field `" + Twine(LateParsedStr) + "`should only have type `" + + Twine(LateAttrParseKindStr) + "` but found type `" + + SuperClasses[0]->getName() + "`"); // Get Kind and verify the enum name matches the name in `Attr.td`. unsigned Kind = LAPK->getValueAsInt(KindFieldStr); @@ -1887,11 +1884,11 @@ static LateAttrParseKind getLateAttrParseKind(const Record *Attr) { #define CASE(X) \ case LateAttrParseKind::X: \ if (LAPK->getName().compare("LateAttrParse" #X) != 0) { \ - PrintFatalError(Attr, \ - "Field `" + llvm::Twine(LateParsedStr) + "` set to `" + \ - LAPK->getName() + \ - "` but this converts to `LateAttrParseKind::" + \ - llvm::Twine(#X) + "`"); \ + PrintFatalError( \ + Attr, \ + "Field `" + Twine(LateParsedStr) + "` set to `" + LAPK->getName() + \ + "` but this converts to `LateAttrParseKind::" + Twine(#X) + \ + "`"); \ } \ return LateAttrParseKind::X; @@ -1902,11 +1899,10 @@ static LateAttrParseKind getLateAttrParseKind(const Record *Attr) { } // The Kind value is completely invalid - auto KindValueStr = llvm::utostr(Kind); - PrintFatalError(Attr, "Field `" + llvm::Twine(LateParsedStr) + "` set to `" + + auto KindValueStr = utostr(Kind); + PrintFatalError(Attr, "Field `" + Twine(LateParsedStr) + "` set to `" + LAPK->getName() + "` has unexpected `" + - llvm::Twine(KindFieldStr) + "` value of " + - KindValueStr); + Twine(KindFieldStr) + "` value of " + KindValueStr); } // Emits the LateParsed property for attributes. @@ -2061,7 +2057,7 @@ struct PragmaClangAttributeSupport { return RuleOrAggregateRuleSet(Rules, /*IsRule=*/false); } }; - llvm::DenseMap<const Record *, RuleOrAggregateRuleSet> SubjectsToRules; + DenseMap<const Record *, RuleOrAggregateRuleSet> SubjectsToRules; PragmaClangAttributeSupport(const RecordKeeper &Records); @@ -2301,7 +2297,7 @@ void PragmaClangAttributeSupport::generateParsingHelpers(raw_ostream &OS) { OS << " return std::nullopt;\n"; OS << "}\n\n"; - llvm::MapVector<const Record *, std::vector<AttributeSubjectMatchRule>> + MapVector<const Record *, std::vector<AttributeSubjectMatchRule>> SubMatchRules; for (const auto &Rule : Rules) { if (!Rule.isSubRule()) @@ -2404,7 +2400,7 @@ void generateNameToAttrsMap(const RecordKeeper &Records) { for (const auto &S : Spellings) { auto It = NameToAttrsMap.find(S.name()); if (It != NameToAttrsMap.end()) { - if (llvm::none_of(It->second, [&](const Record *R) { return R == A; })) + if (none_of(It->second, [&](const Record *R) { return R == A; })) It->second.emplace_back(A); } else { std::vector<const Record *> V; @@ -2542,17 +2538,16 @@ static void emitClangAttrArgContextList(const RecordKeeper &Records, static bool isIdentifierArgument(const Record *Arg) { return !Arg->getSuperClasses().empty() && - llvm::StringSwitch<bool>(Arg->getSuperClasses().back().first->getName()) - .Case("IdentifierArgument", true) - .Case("EnumArgument", true) - .Case("VariadicEnumArgument", true) - .Default(false); + StringSwitch<bool>(Arg->getSuperClasses().back().first->getName()) + .Case("IdentifierArgument", true) + .Case("EnumArgument", true) + .Case("VariadicEnumArgument", true) + .Default(false); } static bool isVariadicIdentifierArgument(const Record *Arg) { return !Arg->getSuperClasses().empty() && - llvm::StringSwitch<bool>( - Arg->getSuperClasses().back().first->getName()) + StringSwitch<bool>(Arg->getSuperClasses().back().first->getName()) .Case("VariadicIdentifierArgument", true) .Case("VariadicParamOrParamIdxArgument", true) .Default(false); @@ -2560,8 +2555,7 @@ static bool isVariadicIdentifierArgument(const Record *Arg) { static bool isVariadicExprArgument(const Record *Arg) { return !Arg->getSuperClasses().empty() && - llvm::StringSwitch<bool>( - Arg->getSuperClasses().back().first->getName()) + StringSwitch<bool>(Arg->getSuperClasses().back().first->getName()) .Case("VariadicExprArgument", true) .Default(false); } @@ -2658,7 +2652,7 @@ static void emitClangAttrStrictIdentifierArgList(const RecordKeeper &Records, continue; // Check that there is really an identifier argument. std::vector<Record *> Args = Attr->getValueAsListOfDefs("Args"); - if (llvm::none_of(Args, [&](Record *R) { return isIdentifierArgument(R); })) + if (none_of(Args, [&](Record *R) { return isIdentifierArgument(R); })) continue; generateFlattenedSpellingInfo(*Attr, FSIMap); } @@ -2668,8 +2662,7 @@ static void emitClangAttrStrictIdentifierArgList(const RecordKeeper &Records, static bool keywordThisIsaIdentifierInArgument(const Record *Arg) { return !Arg->getSuperClasses().empty() && - llvm::StringSwitch<bool>( - Arg->getSuperClasses().back().first->getName()) + StringSwitch<bool>(Arg->getSuperClasses().back().first->getName()) .Case("VariadicParamOrParamIdxArgument", true) .Default(false); } @@ -2759,7 +2752,7 @@ static void emitAttributes(const RecordKeeper &Records, raw_ostream &OS, assert(!Supers.empty() && "Forgot to specify a superclass for the attr"); std::string SuperName; bool Inheritable = false; - for (const auto &Super : llvm::reverse(Supers)) { + for (const auto &Super : reverse(Supers)) { const Record *R = Super.first; if (R->getName() != "TargetSpecificAttr" && R->getName() != "DeclOrTypeAttr" && SuperName.empty()) @@ -2843,8 +2836,8 @@ static void emitAttributes(const RecordKeeper &Records, raw_ostream &OS, if (Header) OS << SpellingEnum; - const auto &ParsedAttrSpellingItr = llvm::find_if( - AttrMap, [R](const std::pair<std::string, const Record *> &P) { + const auto &ParsedAttrSpellingItr = + find_if(AttrMap, [R](const std::pair<std::string, const Record *> &P) { return &R == P.second; }); @@ -3133,7 +3126,7 @@ static void emitAttributes(const RecordKeeper &Records, raw_ostream &OS, // Emit constructors that takes no arguments if none already exists. // This is used for delaying arguments. bool HasRequiredArgs = - llvm::count_if(Args, [=](const std::unique_ptr<Argument> &arg) { + count_if(Args, [=](const std::unique_ptr<Argument> &arg) { return !arg->isFake() && !arg->isOptional(); }); if (DelayedArgs && HasRequiredArgs) @@ -3280,7 +3273,7 @@ static void emitAttrList(raw_ostream &OS, StringRef Class, // Determines if an attribute has a Pragma spelling. static bool AttrHasPragmaSpelling(const Record *R) { std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(*R); - return llvm::any_of(Spellings, [](const FlattenedSpelling &S) { + return any_of(Spellings, [](const FlattenedSpelling &S) { return S.variety() == "Pragma"; }); } @@ -3646,7 +3639,7 @@ static bool GenerateTargetSpecificAttrChecks(const Record *R, std::string *FnName) { bool AnyTargetChecks = false; - // It is assumed that there will be an llvm::Triple object + // It is assumed that there will be an Triple object // named "T" and a TargetInfo object named "Target" within // scope that can be used to determine whether the attribute exists in // a given target. @@ -3756,8 +3749,8 @@ static void GenerateHasAttrSpellingStringSwitch( } std::string TestStr = !Test.empty() - ? Test + " ? " + llvm::itostr(Version) + " : 0" - : llvm::itostr(Version); + ? Test + " ? " + itostr(Version) + " : 0" + : itostr(Version); if (Scope.empty() || Scope == Spelling.nameSpace()) OS << " .Case(\"" << Spelling.name() << "\", " << TestStr << ")\n"; } @@ -3780,7 +3773,7 @@ void EmitClangRegularKeywordAttributeInfo(const RecordKeeper &Records, if (!isRegularKeywordAttribute(S)) continue; std::vector<Record *> Args = R->getValueAsListOfDefs("Args"); - bool HasArgs = llvm::any_of( + bool HasArgs = any_of( Args, [](const Record *Arg) { return !Arg->getValueAsBit("Fake"); }); OS << "KEYWORD_ATTRIBUTE(" @@ -4136,7 +4129,7 @@ static std::string CalculateDiagnostic(const Record &S) { // The node may contain a list of elements itself, so split the elements // by a comma, and trim any whitespace. SmallVector<StringRef, 2> Frags; - llvm::SplitString(V, Frags, ","); + SplitString(V, Frags, ","); for (auto Str : Frags) { DiagList.push_back(std::string(Str.trim())); } @@ -4164,7 +4157,7 @@ static std::string CalculateDiagnostic(const Record &S) { // elements with a comma. This leaves the string in the state: foo, bar, // baz (but misses quux). We can then add ", and " for the last element // manually. - std::string Diag = llvm::join(DiagList.begin(), DiagList.end() - 1, ", "); + std::string Diag = join(DiagList.begin(), DiagList.end() - 1, ", "); return '"' + Diag + ", and " + *(DiagList.end() - 1) + '"'; } @@ -4230,12 +4223,11 @@ static void GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) { // enough statement attributes with custom subject needs to warrant // the implementation effort. std::vector<Record *> DeclSubjects, StmtSubjects; - llvm::copy_if( - Subjects, std::back_inserter(DeclSubjects), [](const Record *R) { - return R->isSubClassOf("SubsetSubject") || !R->isSubClassOf("StmtNode"); - }); - llvm::copy_if(Subjects, std::back_inserter(StmtSubjects), - [](const Record *R) { return R->isSubClassOf("StmtNode"); }); + copy_if(Subjects, std::back_inserter(DeclSubjects), [](const Record *R) { + return R->isSubClassOf("SubsetSubject") || !R->isSubClassOf("StmtNode"); + }); + copy_if(Subjects, std::back_inserter(StmtSubjects), + [](const Record *R) { return R->isSubClassOf("StmtNode"); }); // We should have sorted all of the subjects into two lists. // FIXME: this assertion will be wrong if we ever add type attribute subjects. @@ -4353,7 +4345,7 @@ static void GenerateMutualExclusionsChecks(const Record &Attr, auto IsCurAttr = [Attr](const Record *R) { return R->getName() == Attr.getName(); }; - if (llvm::any_of(MutuallyExclusiveAttrs, IsCurAttr)) { + if (any_of(MutuallyExclusiveAttrs, IsCurAttr)) { // This list of exclusions includes the attribute we're looking for, so // add the exclusive attributes to the proper list for checking. for (const Record *AttrToExclude : MutuallyExclusiveAttrs) { @@ -4565,8 +4557,7 @@ GenerateSpellingTargetRequirements(const Record &Attr, Test += "((SpellingListIndex == "; for (unsigned Index = 0; Index < Spellings.size(); ++Index) { - Test += - llvm::itostr(getSpellingListIndex(SpellingList, Spellings[Index])); + Test += itostr(getSpellingListIndex(SpellingList, Spellings[Index])); if (Index != Spellings.size() - 1) Test += " ||\n SpellingListIndex == "; else @@ -4632,8 +4623,7 @@ static void GenerateHandleDeclAttribute(const Record &Attr, raw_ostream &OS) { static bool isParamExpr(const Record *Arg) { return !Arg->getSuperClasses().empty() && - llvm::StringSwitch<bool>( - Arg->getSuperClasses().back().first->getName()) + StringSwitch<bool>(Arg->getSuperClasses().back().first->getName()) .Case("ExprArgument", true) .Case("VariadicExprArgument", true) .Default(false); @@ -4683,9 +4673,8 @@ void GenerateHandleAttrWithDelayedArgs(const RecordKeeper &Records, static bool IsKnownToGCC(const Record &Attr) { // Look at the spellings for this subject; if there are any spellings which // claim to be known to GCC, the attribute is known to GCC. - return llvm::any_of( - GetFlattenedSpellings(Attr), - [](const FlattenedSpelling &S) { return S.knownToGCC(); }); + return any_of(GetFlattenedSpellings(Attr), + [](const FlattenedSpelling &S) { return S.knownToGCC(); }); } /// Emits the parsed attribute helpers @@ -4967,7 +4956,7 @@ void EmitClangAttrTextNodeDump(const RecordKeeper &Records, raw_ostream &OS) { // spelling used for the attribute. std::string FunctionContent; - llvm::raw_string_ostream SS(FunctionContent); + raw_string_ostream SS(FunctionContent); std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(R); if (Spellings.size() > 1 && !SpellingNamesAreCommon(Spellings)) @@ -5001,7 +4990,7 @@ void EmitClangAttrNodeTraverse(const RecordKeeper &Records, raw_ostream &OS) { continue; std::string FunctionContent; - llvm::raw_string_ostream SS(FunctionContent); + raw_string_ostream SS(FunctionContent); std::vector<Record *> Args = R.getValueAsListOfDefs("Args"); for (const auto *Arg : Args) @@ -5173,7 +5162,7 @@ GetAttributeHeadingAndSpellings(const Record &Documentation, // concatenating all the spellings. Might not be great, but better than // nothing. else if (Cat == "Undocumented") - Heading = llvm::join(Uniques.begin(), Uniques.end(), ", "); + Heading = join(Uniques.begin(), Uniques.end(), ", "); } } @@ -5291,10 +5280,10 @@ void EmitClangAttrDocs(const RecordKeeper &Records, raw_ostream &OS) { for (auto &I : SplitDocs) { WriteCategoryHeader(I.first, OS); - llvm::sort(I.second, - [](const DocumentationData &D1, const DocumentationData &D2) { - return D1.Heading < D2.Heading; - }); + sort(I.second, + [](const DocumentationData &D1, const DocumentationData &D2) { + return D1.Heading < D2.Heading; + }); // Walk over each of the attributes in the category and write out their // documentation. @@ -5321,7 +5310,7 @@ void EmitTestPragmaAttributeSupportedAttributes(const RecordKeeper &Records, SubjectObj->getValueAsListOfDefs("Subjects"); OS << " ("; bool PrintComma = false; - for (const auto &Subject : llvm::enumerate(Subjects)) { + for (const auto &Subject : enumerate(Subjects)) { if (!isSupportedPragmaClangAttributeSubject(*Subject.value())) continue; if (PrintComma) @@ -5334,7 +5323,7 @@ void EmitTestPragmaAttributeSupportedAttributes(const RecordKeeper &Records, continue; } OS << "("; - for (const auto &Rule : llvm::enumerate(RuleSet.getAggregateRuleSet())) { + for (const auto &Rule : enumerate(RuleSet.getAggregateRuleSet())) { if (Rule.index()) OS << ", "; OS << Rule.value().getEnumValueName(); diff --git a/clang/utils/TableGen/ClangBuiltinsEmitter.cpp b/clang/utils/TableGen/ClangBuiltinsEmitter.cpp index 4ae7600..20231ee 100644 --- a/clang/utils/TableGen/ClangBuiltinsEmitter.cpp +++ b/clang/utils/TableGen/ClangBuiltinsEmitter.cpp @@ -133,7 +133,7 @@ private: if (!T.consume_front("<")) PrintFatalError(Loc, "Expected '<' after '_ExtVector'"); unsigned long long Lanes; - if (llvm::consumeUnsignedInteger(T, 10, Lanes)) + if (consumeUnsignedInteger(T, 10, Lanes)) PrintFatalError(Loc, "Expected number of lanes after '_ExtVector<'"); Type += "E" + std::to_string(Lanes); if (!T.consume_front(",")) @@ -187,7 +187,7 @@ private: } public: - void Print(llvm::raw_ostream &OS) const { OS << ", \"" << Type << '\"'; } + void Print(raw_ostream &OS) const { OS << ", \"" << Type << '\"'; } private: SMLoc Loc; @@ -208,14 +208,13 @@ public: } } - void Print(llvm::raw_ostream &OS) const { OS << HeaderName; } + void Print(raw_ostream &OS) const { OS << HeaderName; } private: std::string HeaderName; }; -void PrintAttributes(const Record *Builtin, BuiltinType BT, - llvm::raw_ostream &OS) { +void PrintAttributes(const Record *Builtin, BuiltinType BT, raw_ostream &OS) { OS << '\"'; if (Builtin->isSubClassOf("LibBuiltin")) { if (BT == BuiltinType::LibBuiltin) { @@ -241,7 +240,7 @@ void PrintAttributes(const Record *Builtin, BuiltinType BT, OS << '\"'; } -void EmitBuiltinDef(llvm::raw_ostream &OS, StringRef Substitution, +void EmitBuiltinDef(raw_ostream &OS, StringRef Substitution, const Record *Builtin, Twine Spelling, BuiltinType BT) { if (Builtin->getValueAsBit("RequiresUndef")) OS << "#undef " << Spelling << '\n'; @@ -304,14 +303,14 @@ TemplateInsts getTemplateInsts(const Record *R) { PrintFatalError(R->getLoc(), "Substitutions and affixes " "don't have the same lengths"); - for (auto [Affix, Substitution] : llvm::zip(Affixes, Substitutions)) { + for (auto [Affix, Substitution] : zip(Affixes, Substitutions)) { temp.Substitution.emplace_back(Substitution); temp.Affix.emplace_back(Affix); } return temp; } -void EmitBuiltin(llvm::raw_ostream &OS, const Record *Builtin) { +void EmitBuiltin(raw_ostream &OS, const Record *Builtin) { TemplateInsts Templates = {}; if (Builtin->isSubClassOf("Template")) { Templates = getTemplateInsts(Builtin); @@ -321,7 +320,7 @@ void EmitBuiltin(llvm::raw_ostream &OS, const Record *Builtin) { } for (auto [Substitution, Affix] : - llvm::zip(Templates.Substitution, Templates.Affix)) { + zip(Templates.Substitution, Templates.Affix)) { for (StringRef Spelling : Builtin->getValueAsListOfStrings("Spellings")) { auto FullSpelling = (Templates.IsPrefix ? Affix + Spelling : Spelling + Affix).str(); @@ -345,8 +344,7 @@ void EmitBuiltin(llvm::raw_ostream &OS, const Record *Builtin) { } } // namespace -void clang::EmitClangBuiltins(const llvm::RecordKeeper &Records, - llvm::raw_ostream &OS) { +void clang::EmitClangBuiltins(const RecordKeeper &Records, raw_ostream &OS) { emitSourceFileHeader("List of builtins that Clang recognizes", OS); OS << R"c++( diff --git a/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp b/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp index 773668c..7a8aa181 100644 --- a/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp +++ b/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp @@ -131,12 +131,12 @@ namespace { }; struct GroupInfo { - llvm::StringRef GroupName; + StringRef GroupName; std::vector<const Record*> DiagsInGroup; std::vector<std::string> SubGroups; unsigned IDNo = 0; - llvm::SmallVector<const Record *, 1> Defs; + SmallVector<const Record *, 1> Defs; GroupInfo() = default; }; @@ -213,7 +213,7 @@ static void groupDiagnostics(ArrayRef<const Record *> Diags, if (IsImplicit) continue; - llvm::SMLoc Loc = Def->getLoc().front(); + SMLoc Loc = Def->getLoc().front(); if (First) { SrcMgr.PrintMessage(Loc, SourceMgr::DK_Error, Twine("group '") + Group.first + @@ -228,7 +228,7 @@ static void groupDiagnostics(ArrayRef<const Record *> Diags, if (!cast<DefInit>(Diag->getValueInit("Group"))->getDef()->isAnonymous()) continue; - llvm::SMLoc Loc = Diag->getLoc().front(); + SMLoc Loc = Diag->getLoc().front(); if (First) { SrcMgr.PrintMessage(Loc, SourceMgr::DK_Error, Twine("group '") + Group.first + @@ -247,20 +247,19 @@ static void groupDiagnostics(ArrayRef<const Record *> Diags, //===----------------------------------------------------------------------===// typedef std::vector<const Record *> RecordVec; -typedef llvm::DenseSet<const Record *> RecordSet; -typedef llvm::PointerUnion<RecordVec*, RecordSet*> VecOrSet; +typedef DenseSet<const Record *> RecordSet; +typedef PointerUnion<RecordVec *, RecordSet *> VecOrSet; namespace { class InferPedantic { - typedef llvm::DenseMap<const Record *, - std::pair<unsigned, std::optional<unsigned>>> + typedef DenseMap<const Record *, std::pair<unsigned, std::optional<unsigned>>> GMap; DiagGroupParentMap &DiagGroupParents; ArrayRef<const Record *> Diags; const std::vector<const Record *> DiagGroups; std::map<std::string, GroupInfo> &DiagsInGroup; - llvm::DenseSet<const Record*> DiagsSet; + DenseSet<const Record *> DiagsSet; GMap GroupCount; public: InferPedantic(DiagGroupParentMap &DiagGroupParents, @@ -277,8 +276,7 @@ public: private: /// Determine whether a group is a subgroup of another group. - bool isSubGroupOfGroup(const Record *Group, - llvm::StringRef RootGroupName); + bool isSubGroupOfGroup(const Record *Group, StringRef RootGroupName); /// Determine if the diagnostic is an extension. bool isExtension(const Record *Diag); @@ -295,8 +293,7 @@ private: }; } // end anonymous namespace -bool InferPedantic::isSubGroupOfGroup(const Record *Group, - llvm::StringRef GName) { +bool InferPedantic::isSubGroupOfGroup(const Record *Group, StringRef GName) { const std::string &GroupName = std::string(Group->getValueAsString("GroupName")); if (GName == GroupName) @@ -409,8 +406,8 @@ void InferPedantic::compute(VecOrSet DiagsInPedantic, const std::vector<const Record *> &Parents = DiagGroupParents.getParents(Group); - bool AllParentsInPedantic = llvm::all_of( - Parents, [&](const Record *R) { return groupInPedantic(R); }); + bool AllParentsInPedantic = + all_of(Parents, [&](const Record *R) { return groupInPedantic(R); }); // If all the parents are in -Wpedantic, this means that this diagnostic // group will be indirectly included by -Wpedantic already. In that // case, do not add it directly to -Wpedantic. If the group has no @@ -613,11 +610,12 @@ struct DiagnosticTextBuilder { Piece *getSubstitution(SubstitutionPiece *S) const { auto It = Substitutions.find(S->Name); if (It == Substitutions.end()) - PrintFatalError("Failed to find substitution with name: " + S->Name); + llvm::PrintFatalError("Failed to find substitution with name: " + + S->Name); return It->second.Root; } - [[noreturn]] void PrintFatalError(llvm::Twine const &Msg) const { + [[noreturn]] void PrintFatalError(Twine const &Msg) const { assert(EvaluatingRecord && "not evaluating a record?"); llvm::PrintFatalError(EvaluatingRecord->getLoc(), Msg); } @@ -1022,8 +1020,8 @@ Piece *DiagnosticTextBuilder::DiagText::parseDiagText(StringRef &Text, StopAt Stop) { std::vector<Piece *> Parsed; - constexpr llvm::StringLiteral StopSets[] = {"%", "%|}", "%|}$"}; - llvm::StringRef StopSet = StopSets[static_cast<int>(Stop)]; + constexpr StringLiteral StopSets[] = {"%", "%|}", "%|}$"}; + StringRef StopSet = StopSets[static_cast<int>(Stop)]; while (!Text.empty()) { size_t End = (size_t)-2; @@ -1050,7 +1048,7 @@ Piece *DiagnosticTextBuilder::DiagText::parseDiagText(StringRef &Text, size_t ModLength = Text.find_first_of("0123456789{"); StringRef Modifier = Text.slice(0, ModLength); Text = Text.slice(ModLength, StringRef::npos); - ModifierType ModType = llvm::StringSwitch<ModifierType>{Modifier} + ModifierType ModType = StringSwitch<ModifierType>{Modifier} .Case("select", MT_Select) .Case("sub", MT_Sub) .Case("diff", MT_Diff) @@ -1227,7 +1225,7 @@ static bool isExemptAtStart(StringRef Text) { // OBJECT_MODE. However, if there's only a single letter other than "C", we // do not exempt it so that we catch a case like "A really bad idea" while // still allowing a case like "C does not allow...". - if (llvm::all_of(Text, [](char C) { + if (all_of(Text, [](char C) { return isUpper(C) || isDigit(C) || C == '+' || C == '_'; })) return Text.size() > 1 || Text[0] == 'C'; @@ -1530,11 +1528,11 @@ void clang::EmitClangDiagsDefs(const RecordKeeper &Records, raw_ostream &OS, // Warning Group Tables generation //===----------------------------------------------------------------------===// -static std::string getDiagCategoryEnum(llvm::StringRef name) { +static std::string getDiagCategoryEnum(StringRef name) { if (name.empty()) return "DiagCat_None"; - SmallString<256> enumName = llvm::StringRef("DiagCat_"); - for (llvm::StringRef::iterator I = name.begin(), E = name.end(); I != E; ++I) + SmallString<256> enumName = StringRef("DiagCat_"); + for (StringRef::iterator I = name.begin(), E = name.end(); I != E; ++I) enumName += isalnum(*I) ? *I : '_'; return std::string(enumName); } @@ -1841,10 +1839,9 @@ void clang::EmitClangDiagsIndexName(const RecordKeeper &Records, Index.push_back(RecordIndexElement(R)); } - llvm::sort(Index, - [](const RecordIndexElement &Lhs, const RecordIndexElement &Rhs) { - return Lhs.Name < Rhs.Name; - }); + sort(Index, [](const RecordIndexElement &Lhs, const RecordIndexElement &Rhs) { + return Lhs.Name < Rhs.Name; + }); for (unsigned i = 0, e = Index.size(); i != e; ++i) { const RecordIndexElement &R = Index[i]; @@ -1941,7 +1938,7 @@ void clang::EmitClangDiagDocs(const RecordKeeper &Records, raw_ostream &OS) { std::vector<const Record *> DiagGroups = Records.getAllDerivedDefinitions("DiagGroup"); - llvm::sort(DiagGroups, diagGroupBeforeByName); + sort(DiagGroups, diagGroupBeforeByName); DiagGroupParentMap DGParentMap(Records); @@ -1960,8 +1957,8 @@ void clang::EmitClangDiagDocs(const RecordKeeper &Records, raw_ostream &OS) { DiagsInPedanticSet.end()); RecordVec GroupsInPedantic(GroupsInPedanticSet.begin(), GroupsInPedanticSet.end()); - llvm::sort(DiagsInPedantic, beforeThanCompare); - llvm::sort(GroupsInPedantic, beforeThanCompare); + sort(DiagsInPedantic, beforeThanCompare); + sort(GroupsInPedantic, beforeThanCompare); PedDiags.DiagsInGroup.insert(PedDiags.DiagsInGroup.end(), DiagsInPedantic.begin(), DiagsInPedantic.end()); @@ -2012,7 +2009,7 @@ void clang::EmitClangDiagDocs(const RecordKeeper &Records, raw_ostream &OS) { OS << "Also controls "; bool First = true; - llvm::sort(GroupInfo.SubGroups); + sort(GroupInfo.SubGroups); for (const auto &Name : GroupInfo.SubGroups) { if (!First) OS << ", "; OS << "`" << (IsRemarkGroup ? "-R" : "-W") << Name << "`_"; diff --git a/clang/utils/TableGen/ClangOpcodesEmitter.cpp b/clang/utils/TableGen/ClangOpcodesEmitter.cpp index 7e426d5..a18220f 100644 --- a/clang/utils/TableGen/ClangOpcodesEmitter.cpp +++ b/clang/utils/TableGen/ClangOpcodesEmitter.cpp @@ -57,7 +57,7 @@ private: void Enumerate(const Record *R, StringRef N, std::function<void(ArrayRef<const Record *>, Twine)> &&F) { - llvm::SmallVector<const Record *, 2> TypePath; + SmallVector<const Record *, 2> TypePath; const auto *Types = R->getValueAsListInit("Types"); std::function<void(size_t, const Twine &)> Rec; @@ -304,7 +304,7 @@ void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N, OS << "const SourceInfo &I) {\n"; std::function<void(size_t, const Twine &)> Rec; - llvm::SmallVector<const Record *, 2> TS; + SmallVector<const Record *, 2> TS; Rec = [this, &Rec, &OS, Types, &Args, R, &TS, N, EmitFuncName](size_t I, const Twine &ID) { if (I >= Types->size()) { diff --git a/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp b/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp index d68dcc4..4ce8655 100644 --- a/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp +++ b/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp @@ -378,7 +378,6 @@ void BuiltinNameEmitter::ExtractEnumTypes(ArrayRef<const Record *> Types, TypesSeen.insert(std::make_pair(T->getValueAsString("Name"), true)); } } - SS.flush(); } void BuiltinNameEmitter::EmitDeclarations() { @@ -515,8 +514,7 @@ void BuiltinNameEmitter::GetOverloads() { auto Signature = B->getValueAsListOfDefs("Signature"); // Reuse signatures to avoid unnecessary duplicates. - auto it = - llvm::find_if(SignaturesList, + auto it = find_if(SignaturesList, [&](const std::pair<std::vector<Record *>, unsigned> &a) { return a.first == Signature; }); @@ -688,7 +686,7 @@ void BuiltinNameEmitter::GroupBySignature() { CurSignatureList->push_back(Signature.second); } // Sort the list to facilitate future comparisons. - llvm::sort(*CurSignatureList); + sort(*CurSignatureList); // Check if we have already seen another function with the same list of // signatures. If so, just add the name of the function. @@ -731,7 +729,6 @@ void BuiltinNameEmitter::EmitStringMatcher() { raw_string_ostream SS(RetStmt); SS << "return std::make_pair(" << CumulativeIndex << ", " << Ovl.size() << ");"; - SS.flush(); ValidBuiltins.push_back( StringMatcher::StringPair(std::string(FctName), RetStmt)); } @@ -1278,7 +1275,7 @@ void OpenCLBuiltinHeaderEmitter::emit() { // Iterate over all builtins; sort to follow order of definition in .td file. std::vector<const Record *> Builtins = Records.getAllDerivedDefinitions("Builtin"); - llvm::sort(Builtins, LessRecord()); + sort(Builtins, LessRecord()); for (const auto *B : Builtins) { StringRef Name = B->getValueAsString("Name"); diff --git a/clang/utils/TableGen/ClangOptionDocEmitter.cpp b/clang/utils/TableGen/ClangOptionDocEmitter.cpp index 8c32f02..d8a467f 100644 --- a/clang/utils/TableGen/ClangOptionDocEmitter.cpp +++ b/clang/utils/TableGen/ClangOptionDocEmitter.cpp @@ -136,7 +136,7 @@ Documentation extractDocumentation(const RecordKeeper &Records, auto DocumentationForOption = [&](const Record *R) -> DocumentedOption { auto &A = Aliases[R]; - llvm::sort(A, CompareByName); + sort(A, CompareByName); return {R, std::move(A)}; }; @@ -145,7 +145,7 @@ Documentation extractDocumentation(const RecordKeeper &Records, Documentation D; auto &Groups = GroupsInGroup[R]; - llvm::sort(Groups, CompareByLocation); + sort(Groups, CompareByLocation); for (const Record *G : Groups) { D.Groups.emplace_back(); D.Groups.back().Group = G; @@ -156,7 +156,7 @@ Documentation extractDocumentation(const RecordKeeper &Records, } auto &Options = OptionsInGroup[R]; - llvm::sort(Options, CompareByName); + sort(Options, CompareByName); for (const Record *O : Options) if (isOptionVisible(O, DocInfo)) D.Options.push_back(DocumentationForOption(O)); diff --git a/clang/utils/TableGen/ClangSACheckersEmitter.cpp b/clang/utils/TableGen/ClangSACheckersEmitter.cpp index 44c2d8b..998c5ed 100644 --- a/clang/utils/TableGen/ClangSACheckersEmitter.cpp +++ b/clang/utils/TableGen/ClangSACheckersEmitter.cpp @@ -85,7 +85,7 @@ static std::string getCheckerDocs(const Record &R) { return ""; std::string CheckerFullName = StringRef(getCheckerFullName(&R, "-")).lower(); - return (llvm::Twine("https://clang.llvm.org/docs/analyzer/checkers.html#") + + return (Twine("https://clang.llvm.org/docs/analyzer/checkers.html#") + CheckerFullName) .str(); } @@ -137,7 +137,7 @@ static bool isHidden(const Record *R) { return false; } -static void printChecker(llvm::raw_ostream &OS, const Record &R) { +static void printChecker(raw_ostream &OS, const Record &R) { OS << "CHECKER(" << "\""; OS.write_escaped(getCheckerFullName(&R)) << "\", "; OS << R.getName() << ", "; @@ -155,8 +155,7 @@ static void printChecker(llvm::raw_ostream &OS, const Record &R) { OS << ")\n"; } -static void printOption(llvm::raw_ostream &OS, StringRef FullName, - const Record &R) { +static void printOption(raw_ostream &OS, StringRef FullName, const Record &R) { OS << "\""; OS.write_escaped(getCheckerOptionType(R)) << "\", \""; OS.write_escaped(FullName) << "\", "; @@ -180,7 +179,7 @@ void clang::EmitClangSACheckers(const RecordKeeper &Records, raw_ostream &OS) { ArrayRef<const Record *> packages = Records.getAllDerivedDefinitions("Package"); - using SortedRecords = llvm::StringMap<const Record *>; + using SortedRecords = StringMap<const Record *>; OS << "// This file is automatically generated. Do not edit this file by " "hand.\n"; diff --git a/clang/utils/TableGen/ClangSyntaxEmitter.cpp b/clang/utils/TableGen/ClangSyntaxEmitter.cpp index 66b27be..4098a5e 100644 --- a/clang/utils/TableGen/ClangSyntaxEmitter.cpp +++ b/clang/utils/TableGen/ClangSyntaxEmitter.cpp @@ -33,47 +33,47 @@ #include "llvm/TableGen/Record.h" #include "llvm/TableGen/TableGenBackend.h" +using namespace llvm; + namespace { -using llvm::formatv; // The class hierarchy of Node types. // We assemble this in order to be able to define the NodeKind enum in a // stable and useful way, where abstract Node subclasses correspond to ranges. class Hierarchy { public: - Hierarchy(const llvm::RecordKeeper &Records) { - for (const llvm::Record *T : Records.getAllDerivedDefinitions("NodeType")) + Hierarchy(const RecordKeeper &Records) { + for (const Record *T : Records.getAllDerivedDefinitions("NodeType")) add(T); - for (const llvm::Record *Derived : - Records.getAllDerivedDefinitions("NodeType")) - if (const llvm::Record *Base = Derived->getValueAsOptionalDef("base")) + for (const Record *Derived : Records.getAllDerivedDefinitions("NodeType")) + if (const Record *Base = Derived->getValueAsOptionalDef("base")) link(Derived, Base); for (NodeType &N : AllTypes) { - llvm::sort(N.Derived, [](const NodeType *L, const NodeType *R) { - return L->Record->getName() < R->Record->getName(); + sort(N.Derived, [](const NodeType *L, const NodeType *R) { + return L->Rec->getName() < R->Rec->getName(); }); // Alternatives nodes must have subclasses, External nodes may do. - assert(N.Record->isSubClassOf("Alternatives") || - N.Record->isSubClassOf("External") || N.Derived.empty()); - assert(!N.Record->isSubClassOf("Alternatives") || !N.Derived.empty()); + assert(N.Rec->isSubClassOf("Alternatives") || + N.Rec->isSubClassOf("External") || N.Derived.empty()); + assert(!N.Rec->isSubClassOf("Alternatives") || !N.Derived.empty()); } } struct NodeType { - const llvm::Record *Record = nullptr; + const Record *Rec = nullptr; const NodeType *Base = nullptr; std::vector<const NodeType *> Derived; - llvm::StringRef name() const { return Record->getName(); } + StringRef name() const { return Rec->getName(); } }; - NodeType &get(llvm::StringRef Name = "Node") { + NodeType &get(StringRef Name = "Node") { auto NI = ByName.find(Name); assert(NI != ByName.end() && "no such node"); return *NI->second; } // Traverse the hierarchy in pre-order (base classes before derived). - void visit(llvm::function_ref<void(const NodeType &)> CB, + void visit(function_ref<void(const NodeType &)> CB, const NodeType *Start = nullptr) { if (Start == nullptr) Start = &get(); @@ -83,15 +83,15 @@ public: } private: - void add(const llvm::Record *R) { + void add(const Record *R) { AllTypes.emplace_back(); - AllTypes.back().Record = R; + AllTypes.back().Rec = R; bool Inserted = ByName.try_emplace(R->getName(), &AllTypes.back()).second; assert(Inserted && "Duplicate node name"); (void)Inserted; } - void link(const llvm::Record *Derived, const llvm::Record *Base) { + void link(const Record *Derived, const Record *Base) { auto &CN = get(Derived->getName()), &PN = get(Base->getName()); assert(CN.Base == nullptr && "setting base twice"); PN.Derived.push_back(&CN); @@ -99,7 +99,7 @@ private: } std::deque<NodeType> AllTypes; - llvm::DenseMap<llvm::StringRef, NodeType *> ByName; + DenseMap<StringRef, NodeType *> ByName; }; const Hierarchy::NodeType &firstConcrete(const Hierarchy::NodeType &N) { @@ -110,7 +110,7 @@ const Hierarchy::NodeType &lastConcrete(const Hierarchy::NodeType &N) { } struct SyntaxConstraint { - SyntaxConstraint(const llvm::Record &R) { + SyntaxConstraint(const Record &R) { if (R.isSubClassOf("Optional")) { *this = SyntaxConstraint(*R.getValueAsDef("inner")); } else if (R.isSubClassOf("AnyToken")) { @@ -128,9 +128,9 @@ struct SyntaxConstraint { } // namespace -void clang::EmitClangSyntaxNodeList(const llvm::RecordKeeper &Records, - llvm::raw_ostream &OS) { - llvm::emitSourceFileHeader("Syntax tree node list", OS, Records); +void clang::EmitClangSyntaxNodeList(const RecordKeeper &Records, + raw_ostream &OS) { + emitSourceFileHeader("Syntax tree node list", OS, Records); Hierarchy H(Records); OS << R"cpp( #ifndef NODE @@ -175,21 +175,21 @@ void clang::EmitClangSyntaxNodeList(const llvm::RecordKeeper &Records, // /// widget.explode() // Leading and trailing whitespace lines are stripped. // The indentation of the first line is stripped from all lines. -static void printDoc(llvm::StringRef Doc, llvm::raw_ostream &OS) { +static void printDoc(StringRef Doc, raw_ostream &OS) { Doc = Doc.rtrim(); - llvm::StringRef Line; + StringRef Line; while (Line.trim().empty() && !Doc.empty()) std::tie(Line, Doc) = Doc.split('\n'); - llvm::StringRef Indent = Line.take_while(llvm::isSpace); + StringRef Indent = Line.take_while(isSpace); for (; !Line.empty() || !Doc.empty(); std::tie(Line, Doc) = Doc.split('\n')) { Line.consume_front(Indent); OS << "/// " << Line << "\n"; } } -void clang::EmitClangSyntaxNodeClasses(const llvm::RecordKeeper &Records, - llvm::raw_ostream &OS) { - llvm::emitSourceFileHeader("Syntax tree node list", OS, Records); +void clang::EmitClangSyntaxNodeClasses(const RecordKeeper &Records, + raw_ostream &OS) { + emitSourceFileHeader("Syntax tree node list", OS, Records); Hierarchy H(Records); OS << "\n// Forward-declare node types so we don't have to carefully " @@ -200,9 +200,9 @@ void clang::EmitClangSyntaxNodeClasses(const llvm::RecordKeeper &Records, OS << "\n// Node definitions\n\n"; H.visit([&](const Hierarchy::NodeType &N) { - if (N.Record->isSubClassOf("External")) + if (N.Rec->isSubClassOf("External")) return; - printDoc(N.Record->getValueAsString("documentation"), OS); + printDoc(N.Rec->getValueAsString("documentation"), OS); OS << formatv("class {0}{1} : public {2} {{\n", N.name(), N.Derived.empty() ? " final" : "", N.Base->name()); @@ -214,11 +214,11 @@ void clang::EmitClangSyntaxNodeClasses(const llvm::RecordKeeper &Records, OS << formatv("protected:\n {0}(NodeKind K) : {1}(K) {{}\npublic:\n", N.name(), N.Base->name()); - if (N.Record->isSubClassOf("Sequence")) { + if (N.Rec->isSubClassOf("Sequence")) { // Getters for sequence elements. - for (const auto &C : N.Record->getValueAsListOfDefs("children")) { + for (const auto &C : N.Rec->getValueAsListOfDefs("children")) { assert(C->isSubClassOf("Role")); - llvm::StringRef Role = C->getValueAsString("role"); + StringRef Role = C->getValueAsString("role"); SyntaxConstraint Constraint(*C->getValueAsDef("syntax")); for (const char *Const : {"", "const "}) OS << formatv( diff --git a/clang/utils/TableGen/MveEmitter.cpp b/clang/utils/TableGen/MveEmitter.cpp index 6cfaa89..57e6353 100644 --- a/clang/utils/TableGen/MveEmitter.cpp +++ b/clang/utils/TableGen/MveEmitter.cpp @@ -893,8 +893,8 @@ public: } bool hasCode() const { return Code != nullptr; } - static std::string signedHexLiteral(const llvm::APInt &iOrig) { - llvm::APInt i = iOrig.trunc(64); + static std::string signedHexLiteral(const APInt &iOrig) { + APInt i = iOrig.trunc(64); SmallString<40> s; i.toString(s, 16, true, true); return std::string(s); @@ -907,7 +907,7 @@ public: for (const auto &kv : ImmediateArgs) { const ImmediateArg &IA = kv.second; - llvm::APInt lo(128, 0), hi(128, 0); + APInt lo(128, 0), hi(128, 0); switch (IA.boundsType) { case ImmediateArg::BoundsType::ExplicitRange: lo = IA.i1; @@ -915,7 +915,7 @@ public: break; case ImmediateArg::BoundsType::UInt: lo = 0; - hi = llvm::APInt::getMaxValue(IA.i1).zext(128); + hi = APInt::getMaxValue(IA.i1).zext(128); break; } @@ -925,8 +925,8 @@ public: // immediate is smaller than the _possible_ range of values for // its type. unsigned ArgTypeBits = IA.ArgType->sizeInBits(); - llvm::APInt ArgTypeRange = llvm::APInt::getMaxValue(ArgTypeBits).zext(128); - llvm::APInt ActualRange = (hi-lo).trunc(64).sext(128); + APInt ArgTypeRange = APInt::getMaxValue(ArgTypeBits).zext(128); + APInt ActualRange = (hi - lo).trunc(64).sext(128); if (ActualRange.ult(ArgTypeRange)) SemaChecks.push_back("SemaRef.BuiltinConstantArgRange(TheCall, " + Index + ", " + signedHexLiteral(lo) + ", " + @@ -1583,7 +1583,6 @@ void EmitterBase::EmitBuiltinCG(raw_ostream &OS) { CodeGenParamAllocator ParamAllocPrelim{&MG.ParamTypes, &OI.ParamValues}; raw_string_ostream OS(MG.Code); Int.genCode(OS, ParamAllocPrelim, 1); - OS.flush(); MergeableGroupsPrelim[MG].insert(OI); } @@ -1655,7 +1654,6 @@ void EmitterBase::EmitBuiltinCG(raw_ostream &OS) { &ParamNumbers}; raw_string_ostream OS(MG.Code); Int->genCode(OS, ParamAlloc, 2); - OS.flush(); MergeableGroups[MG].insert(OI); } diff --git a/clang/utils/TableGen/NeonEmitter.cpp b/clang/utils/TableGen/NeonEmitter.cpp index 9e5480b..202573e 100644 --- a/clang/utils/TableGen/NeonEmitter.cpp +++ b/clang/utils/TableGen/NeonEmitter.cpp @@ -434,11 +434,11 @@ public: ImmChecks.emplace_back(ArgIdx, Kind, EltSizeInBits, VecSizeInBits); } - llvm::sort(ImmChecks.begin(), ImmChecks.end(), - [](const ImmCheck &a, const ImmCheck &b) { - return a.getImmArgIdx() < b.getImmArgIdx(); - }); // Sort for comparison with other intrinsics which map to the - // same builtin + sort(ImmChecks.begin(), ImmChecks.end(), + [](const ImmCheck &a, const ImmCheck &b) { + return a.getImmArgIdx() < b.getImmArgIdx(); + }); // Sort for comparison with other intrinsics which map to the + // same builtin } /// Get the Record that this intrinsic is based off. @@ -456,7 +456,7 @@ public: /// Return true if the intrinsic takes an immediate operand. bool hasImmediate() const { - return llvm::any_of(Types, [](const Type &T) { return T.isImmediate(); }); + return any_of(Types, [](const Type &T) { return T.isImmediate(); }); } // Return if the supplied argument is an immediate @@ -1320,8 +1320,8 @@ void Intrinsic::emitShadowedArgs() { } bool Intrinsic::protoHasScalar() const { - return llvm::any_of( - Types, [](const Type &T) { return T.isScalar() && !T.isImmediate(); }); + return any_of(Types, + [](const Type &T) { return T.isScalar() && !T.isImmediate(); }); } void Intrinsic::emitBodyAsBuiltinCall() { @@ -1964,7 +1964,7 @@ Intrinsic &NeonEmitter::getIntrinsic(StringRef Name, ArrayRef<Type> Types, continue; unsigned ArgNum = 0; - bool MatchingArgumentTypes = llvm::all_of(Types, [&](const auto &Type) { + bool MatchingArgumentTypes = all_of(Types, [&](const auto &Type) { return Type == I.getParamType(ArgNum++); }); @@ -2022,7 +2022,7 @@ void NeonEmitter::createIntrinsic(const Record *R, } } - llvm::sort(NewTypeSpecs); + sort(NewTypeSpecs); NewTypeSpecs.erase(std::unique(NewTypeSpecs.begin(), NewTypeSpecs.end()), NewTypeSpecs.end()); auto &Entry = IntrinsicMap[Name]; @@ -2155,9 +2155,7 @@ void NeonEmitter::genOverloadTypeCheckCode(raw_ostream &OS, } if (Mask) { - std::string Name = Def->getMangledName(); - OverloadMap.insert(std::make_pair(Name, OverloadInfo())); - OverloadInfo &OI = OverloadMap[Name]; + OverloadInfo &OI = OverloadMap[Def->getMangledName()]; OI.Mask |= Mask; OI.PtrArgNum |= PtrArgNum; OI.HasConstPtr = HasConstPtr; @@ -2406,7 +2404,7 @@ void NeonEmitter::run(raw_ostream &OS) { for (auto *I : Defs) I->indexBody(); - llvm::stable_sort(Defs, llvm::deref<std::less<>>()); + stable_sort(Defs, deref<std::less<>>()); // Only emit a def when its requirements have been met. // FIXME: This loop could be made faster, but it's fast enough for now. @@ -2419,7 +2417,7 @@ void NeonEmitter::run(raw_ostream &OS) { I != Defs.end(); /*No step*/) { bool DependenciesSatisfied = true; for (auto *II : (*I)->getDependencies()) { - if (llvm::is_contained(Defs, II)) + if (is_contained(Defs, II)) DependenciesSatisfied = false; } if (!DependenciesSatisfied) { @@ -2513,7 +2511,7 @@ void NeonEmitter::runFP16(raw_ostream &OS) { for (auto *I : Defs) I->indexBody(); - llvm::stable_sort(Defs, llvm::deref<std::less<>>()); + stable_sort(Defs, deref<std::less<>>()); // Only emit a def when its requirements have been met. // FIXME: This loop could be made faster, but it's fast enough for now. @@ -2526,7 +2524,7 @@ void NeonEmitter::runFP16(raw_ostream &OS) { I != Defs.end(); /*No step*/) { bool DependenciesSatisfied = true; for (auto *II : (*I)->getDependencies()) { - if (llvm::is_contained(Defs, II)) + if (is_contained(Defs, II)) DependenciesSatisfied = false; } if (!DependenciesSatisfied) { @@ -2621,7 +2619,7 @@ void NeonEmitter::runBF16(raw_ostream &OS) { for (auto *I : Defs) I->indexBody(); - llvm::stable_sort(Defs, llvm::deref<std::less<>>()); + stable_sort(Defs, deref<std::less<>>()); // Only emit a def when its requirements have been met. // FIXME: This loop could be made faster, but it's fast enough for now. @@ -2634,7 +2632,7 @@ void NeonEmitter::runBF16(raw_ostream &OS) { I != Defs.end(); /*No step*/) { bool DependenciesSatisfied = true; for (auto *II : (*I)->getDependencies()) { - if (llvm::is_contained(Defs, II)) + if (is_contained(Defs, II)) DependenciesSatisfied = false; } if (!DependenciesSatisfied) { diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp index 4ef83e7..de03aad 100644 --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -502,8 +502,8 @@ void RVVEmitter::createCodeGen(raw_ostream &OS) { std::vector<std::unique_ptr<RVVIntrinsic>> Defs; createRVVIntrinsics(Defs); // IR name could be empty, use the stable sort preserves the relative order. - llvm::stable_sort(Defs, [](const std::unique_ptr<RVVIntrinsic> &A, - const std::unique_ptr<RVVIntrinsic> &B) { + stable_sort(Defs, [](const std::unique_ptr<RVVIntrinsic> &A, + const std::unique_ptr<RVVIntrinsic> &B) { if (A->getIRName() == B->getIRName()) return (A->getPolicyAttrs() < B->getPolicyAttrs()); return (A->getIRName() < B->getIRName()); @@ -606,7 +606,7 @@ void RVVEmitter::createRVVIntrinsics( BasicPrototype, /*IsMasked=*/false, /*HasMaskedOffOperand=*/false, HasVL, NF, UnMaskedPolicyScheme, DefaultPolicy, IsTuple); - llvm::SmallVector<PrototypeDescriptor> MaskedPrototype; + SmallVector<PrototypeDescriptor> MaskedPrototype; if (HasMasked) MaskedPrototype = RVVIntrinsic::computeBuiltinTypes( BasicPrototype, /*IsMasked=*/true, HasMaskedOffOperand, HasVL, NF, diff --git a/clang/utils/TableGen/SveEmitter.cpp b/clang/utils/TableGen/SveEmitter.cpp index 5abf6fc..2f9747e 100644 --- a/clang/utils/TableGen/SveEmitter.cpp +++ b/clang/utils/TableGen/SveEmitter.cpp @@ -206,7 +206,7 @@ public: ArrayRef<SVEType> getTypes() const { return Types; } SVEType getParamType(unsigned I) const { return Types[I + 1]; } unsigned getNumParams() const { - return Proto.size() - (2 * llvm::count(Proto, '.')) - 1; + return Proto.size() - (2 * count(Proto, '.')) - 1; } uint64_t getFlags() const { return Flags; } @@ -281,11 +281,11 @@ private: static const std::array<ReinterpretTypeInfo, 12> Reinterprets; const RecordKeeper &Records; - llvm::StringMap<uint64_t> EltTypes; - llvm::StringMap<uint64_t> MemEltTypes; - llvm::StringMap<uint64_t> FlagTypes; - llvm::StringMap<uint64_t> MergeTypes; - llvm::StringMap<uint64_t> ImmCheckTypes; + StringMap<uint64_t> EltTypes; + StringMap<uint64_t> MemEltTypes; + StringMap<uint64_t> FlagTypes; + StringMap<uint64_t> MergeTypes; + StringMap<uint64_t> ImmCheckTypes; public: SVEEmitter(const RecordKeeper &R) : Records(R) { @@ -322,7 +322,7 @@ public: auto It = FlagTypes.find(MaskName); if (It != FlagTypes.end()) { uint64_t Mask = It->getValue(); - unsigned Shift = llvm::countr_zero(Mask); + unsigned Shift = countr_zero(Mask); assert(Shift < 64 && "Mask value produced an invalid shift value"); return (V << Shift) & Mask; } @@ -1187,7 +1187,7 @@ void SVEEmitter::createIntrinsic( } // Remove duplicate type specs. - llvm::sort(TypeSpecs); + sort(TypeSpecs); TypeSpecs.erase(std::unique(TypeSpecs.begin(), TypeSpecs.end()), TypeSpecs.end()); @@ -1433,8 +1433,8 @@ void SVEEmitter::createBuiltins(raw_ostream &OS) { createIntrinsic(R, Defs); // The mappings must be sorted based on BuiltinID. - llvm::sort(Defs, [](const std::unique_ptr<Intrinsic> &A, - const std::unique_ptr<Intrinsic> &B) { + sort(Defs, [](const std::unique_ptr<Intrinsic> &A, + const std::unique_ptr<Intrinsic> &B) { return A->getMangledName() < B->getMangledName(); }); @@ -1475,8 +1475,8 @@ void SVEEmitter::createCodeGenMap(raw_ostream &OS) { createIntrinsic(R, Defs); // The mappings must be sorted based on BuiltinID. - llvm::sort(Defs, [](const std::unique_ptr<Intrinsic> &A, - const std::unique_ptr<Intrinsic> &B) { + sort(Defs, [](const std::unique_ptr<Intrinsic> &A, + const std::unique_ptr<Intrinsic> &B) { return A->getMangledName() < B->getMangledName(); }); @@ -1508,12 +1508,11 @@ void SVEEmitter::createRangeChecks(raw_ostream &OS) { createIntrinsic(R, Defs); // The mappings must be sorted based on BuiltinID. - llvm::sort(Defs, [](const std::unique_ptr<Intrinsic> &A, - const std::unique_ptr<Intrinsic> &B) { + sort(Defs, [](const std::unique_ptr<Intrinsic> &A, + const std::unique_ptr<Intrinsic> &B) { return A->getMangledName() < B->getMangledName(); }); - OS << "#ifdef GET_SVE_IMMEDIATE_CHECK\n"; // Ensure these are only emitted once. @@ -1641,8 +1640,8 @@ void SVEEmitter::createSMEBuiltins(raw_ostream &OS) { } // The mappings must be sorted based on BuiltinID. - llvm::sort(Defs, [](const std::unique_ptr<Intrinsic> &A, - const std::unique_ptr<Intrinsic> &B) { + sort(Defs, [](const std::unique_ptr<Intrinsic> &A, + const std::unique_ptr<Intrinsic> &B) { return A->getMangledName() < B->getMangledName(); }); @@ -1669,8 +1668,8 @@ void SVEEmitter::createSMECodeGenMap(raw_ostream &OS) { } // The mappings must be sorted based on BuiltinID. - llvm::sort(Defs, [](const std::unique_ptr<Intrinsic> &A, - const std::unique_ptr<Intrinsic> &B) { + sort(Defs, [](const std::unique_ptr<Intrinsic> &A, + const std::unique_ptr<Intrinsic> &B) { return A->getMangledName() < B->getMangledName(); }); @@ -1703,12 +1702,11 @@ void SVEEmitter::createSMERangeChecks(raw_ostream &OS) { } // The mappings must be sorted based on BuiltinID. - llvm::sort(Defs, [](const std::unique_ptr<Intrinsic> &A, - const std::unique_ptr<Intrinsic> &B) { + sort(Defs, [](const std::unique_ptr<Intrinsic> &A, + const std::unique_ptr<Intrinsic> &B) { return A->getMangledName() < B->getMangledName(); }); - OS << "#ifdef GET_SME_IMMEDIATE_CHECK\n"; // Ensure these are only emitted once. @@ -1790,7 +1788,7 @@ void SVEEmitter::createStreamingAttrs(raw_ostream &OS, ACLEKind Kind) { OS << "#ifdef GET_" << ExtensionKind << "_STREAMING_ATTRS\n"; - llvm::StringMap<std::set<std::string>> StreamingMap; + StringMap<std::set<std::string>> StreamingMap; uint64_t IsStreamingFlag = getEnumValueForFlag("IsStreaming"); uint64_t VerifyRuntimeMode = getEnumValueForFlag("VerifyRuntimeMode"); diff --git a/compiler-rt/lib/asan/asan_posix.cpp b/compiler-rt/lib/asan/asan_posix.cpp index cd57f75..c42c047 100644 --- a/compiler-rt/lib/asan/asan_posix.cpp +++ b/compiler-rt/lib/asan/asan_posix.cpp @@ -59,10 +59,10 @@ bool PlatformUnpoisonStacks() { // Since we're on the signal alternate stack, we cannot find the DEFAULT // stack bottom using a local variable. - uptr stack_begin, stack_end, tls_begin, tls_end; - GetThreadStackAndTls(/*main=*/false, &stack_begin, &stack_end, &tls_begin, - &tls_end); - UnpoisonStack(stack_begin, stack_end, "default"); + uptr default_bottom, tls_addr, tls_size, stack_size; + GetThreadStackAndTls(/*main=*/false, &default_bottom, &stack_size, &tls_addr, + &tls_size); + UnpoisonStack(default_bottom, default_bottom + stack_size, "default"); return true; } @@ -171,7 +171,11 @@ static void AfterFork(bool fork_child) { } void InstallAtForkHandler() { -# if SANITIZER_SOLARIS || SANITIZER_NETBSD || SANITIZER_APPLE +# if SANITIZER_SOLARIS || SANITIZER_NETBSD || SANITIZER_APPLE || \ + (SANITIZER_LINUX && SANITIZER_SPARC) + // While other Linux targets use clone in internal_fork which doesn't + // trigger pthread_atfork handlers, Linux/sparc64 uses __fork, causing a + // hang. return; // FIXME: Implement FutexWait. # endif pthread_atfork( diff --git a/compiler-rt/lib/asan/asan_rtl.cpp b/compiler-rt/lib/asan/asan_rtl.cpp index a390802..d42a75e 100644 --- a/compiler-rt/lib/asan/asan_rtl.cpp +++ b/compiler-rt/lib/asan/asan_rtl.cpp @@ -580,8 +580,10 @@ static void UnpoisonDefaultStack() { } else { CHECK(!SANITIZER_FUCHSIA); // If we haven't seen this thread, try asking the OS for stack bounds. - uptr tls_begin, tls_end; - GetThreadStackAndTls(/*main=*/false, &bottom, &top, &tls_begin, &tls_end); + uptr tls_addr, tls_size, stack_size; + GetThreadStackAndTls(/*main=*/false, &bottom, &stack_size, &tls_addr, + &tls_size); + top = bottom + stack_size; } UnpoisonStack(bottom, top, "default"); diff --git a/compiler-rt/lib/asan/asan_thread.cpp b/compiler-rt/lib/asan/asan_thread.cpp index c1a804b..c79c33a 100644 --- a/compiler-rt/lib/asan/asan_thread.cpp +++ b/compiler-rt/lib/asan/asan_thread.cpp @@ -306,10 +306,13 @@ AsanThread *CreateMainThread() { // OS-specific implementations that need more information passed through. void AsanThread::SetThreadStackAndTls(const InitOptions *options) { DCHECK_EQ(options, nullptr); - GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_top_, - &tls_begin_, &tls_end_); - stack_top_ = RoundDownTo(stack_top_, ASAN_SHADOW_GRANULARITY); + uptr tls_size = 0; + uptr stack_size = 0; + GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size, + &tls_begin_, &tls_size); + stack_top_ = RoundDownTo(stack_bottom_ + stack_size, ASAN_SHADOW_GRANULARITY); stack_bottom_ = RoundDownTo(stack_bottom_, ASAN_SHADOW_GRANULARITY); + tls_end_ = tls_begin_ + tls_size; dtls_ = DTLS_Get(); if (stack_top_ != stack_bottom_) { diff --git a/compiler-rt/lib/dfsan/dfsan_thread.cpp b/compiler-rt/lib/dfsan/dfsan_thread.cpp index 55d3891..c1d4751 100644 --- a/compiler-rt/lib/dfsan/dfsan_thread.cpp +++ b/compiler-rt/lib/dfsan/dfsan_thread.cpp @@ -21,8 +21,13 @@ DFsanThread *DFsanThread::Create(thread_callback_t start_routine, void *arg, } void DFsanThread::SetThreadStackAndTls() { - GetThreadStackAndTls(IsMainThread(), &stack_.bottom, &stack_.top, &tls_begin_, - &tls_end_); + uptr tls_size = 0; + uptr stack_size = 0; + GetThreadStackAndTls(IsMainThread(), &stack_.bottom, &stack_size, &tls_begin_, + &tls_size); + stack_.top = stack_.bottom + stack_size; + tls_end_ = tls_begin_ + tls_size; + int local; CHECK(AddrIsInStack((uptr)&local)); } diff --git a/compiler-rt/lib/hwasan/hwasan_linux.cpp b/compiler-rt/lib/hwasan/hwasan_linux.cpp index d174fb8..68294b5 100644 --- a/compiler-rt/lib/hwasan/hwasan_linux.cpp +++ b/compiler-rt/lib/hwasan/hwasan_linux.cpp @@ -499,8 +499,12 @@ void HwasanOnDeadlySignal(int signo, void *info, void *context) { } void Thread::InitStackAndTls(const InitState *) { - GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_top_, &tls_begin_, - &tls_end_); + uptr tls_size; + uptr stack_size; + GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_, + &tls_size); + stack_top_ = stack_bottom_ + stack_size; + tls_end_ = tls_begin_ + tls_size; } uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) { diff --git a/compiler-rt/lib/lsan/lsan_posix.cpp b/compiler-rt/lib/lsan/lsan_posix.cpp index ddd9fee..422c29a 100644 --- a/compiler-rt/lib/lsan/lsan_posix.cpp +++ b/compiler-rt/lib/lsan/lsan_posix.cpp @@ -50,8 +50,12 @@ void ThreadContext::OnStarted(void *arg) { void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type) { OnStartedArgs args; - GetThreadStackAndTls(tid == kMainTid, &args.stack_begin, &args.stack_end, - &args.tls_begin, &args.tls_end); + uptr stack_size = 0; + uptr tls_size = 0; + GetThreadStackAndTls(tid == kMainTid, &args.stack_begin, &stack_size, + &args.tls_begin, &tls_size); + args.stack_end = args.stack_begin + stack_size; + args.tls_end = args.tls_begin + tls_size; GetAllocatorCacheRange(&args.cache_begin, &args.cache_end); args.dtls = DTLS_Get(); ThreadContextLsanBase::ThreadStart(tid, os_id, thread_type, &args); diff --git a/compiler-rt/lib/memprof/memprof_thread.cpp b/compiler-rt/lib/memprof/memprof_thread.cpp index 50072bb..e2bca9b 100644 --- a/compiler-rt/lib/memprof/memprof_thread.cpp +++ b/compiler-rt/lib/memprof/memprof_thread.cpp @@ -168,8 +168,12 @@ MemprofThread *CreateMainThread() { // OS-specific implementations that need more information passed through. void MemprofThread::SetThreadStackAndTls(const InitOptions *options) { DCHECK_EQ(options, nullptr); - GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_top_, - &tls_begin_, &tls_end_); + uptr tls_size = 0; + uptr stack_size = 0; + GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size, + &tls_begin_, &tls_size); + stack_top_ = stack_bottom_ + stack_size; + tls_end_ = tls_begin_ + tls_size; dtls_ = DTLS_Get(); if (stack_top_ != stack_bottom_) { diff --git a/compiler-rt/lib/msan/msan_thread.cpp b/compiler-rt/lib/msan/msan_thread.cpp index 1a1725f..e5bdedc 100644 --- a/compiler-rt/lib/msan/msan_thread.cpp +++ b/compiler-rt/lib/msan/msan_thread.cpp @@ -20,8 +20,13 @@ MsanThread *MsanThread::Create(thread_callback_t start_routine, } void MsanThread::SetThreadStackAndTls() { - GetThreadStackAndTls(IsMainThread(), &stack_.bottom, &stack_.top, &tls_begin_, - &tls_end_); + uptr tls_size = 0; + uptr stack_size = 0; + GetThreadStackAndTls(IsMainThread(), &stack_.bottom, &stack_size, &tls_begin_, + &tls_size); + stack_.top = stack_.bottom + stack_size; + tls_end_ = tls_begin_ + tls_size; + int local; CHECK(AddrIsInStack((uptr)&local)); } diff --git a/compiler-rt/lib/nsan/nsan_thread.cpp b/compiler-rt/lib/nsan/nsan_thread.cpp index 6662c9b..85706ae 100644 --- a/compiler-rt/lib/nsan/nsan_thread.cpp +++ b/compiler-rt/lib/nsan/nsan_thread.cpp @@ -29,8 +29,13 @@ NsanThread *NsanThread::Create(thread_callback_t start_routine, void *arg) { } void NsanThread::SetThreadStackAndTls() { - GetThreadStackAndTls(IsMainThread(), &stack_.bottom, &stack_.top, &tls_begin_, - &tls_end_); + uptr tls_size = 0; + uptr stack_size = 0; + GetThreadStackAndTls(IsMainThread(), &stack_.bottom, &stack_size, &tls_begin_, + &tls_size); + stack_.top = stack_.bottom + stack_size; + tls_end_ = tls_begin_ + tls_size; + int local; CHECK(AddrIsInStack((uptr)&local)); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_dlsym.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_dlsym.h index 92b1373..b360478 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_dlsym.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_dlsym.h @@ -15,6 +15,8 @@ #define SANITIZER_ALLOCATOR_DLSYM_H #include "sanitizer_allocator_internal.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" +#include "sanitizer_common/sanitizer_internal_defs.h" namespace __sanitizer { @@ -31,15 +33,15 @@ struct DlSymAllocator { UNLIKELY(internal_allocator()->FromPrimary(ptr)); } - static void *Allocate(uptr size_in_bytes) { - void *ptr = InternalAlloc(size_in_bytes, nullptr, kWordSize); + static void *Allocate(uptr size_in_bytes, uptr align = kWordSize) { + void *ptr = InternalAlloc(size_in_bytes, nullptr, align); CHECK(internal_allocator()->FromPrimary(ptr)); Details::OnAllocate(ptr, internal_allocator()->GetActuallyAllocatedSize(ptr)); return ptr; } - static void *Callocate(SIZE_T nmemb, SIZE_T size) { + static void *Callocate(usize nmemb, usize size) { void *ptr = InternalCalloc(nmemb, size); CHECK(internal_allocator()->FromPrimary(ptr)); Details::OnAllocate(ptr, @@ -70,6 +72,11 @@ struct DlSymAllocator { return new_ptr; } + static void *ReallocArray(void *ptr, uptr count, uptr size) { + CHECK(!CheckForCallocOverflow(count, size)); + return Realloc(ptr, count * size); + } + static void OnAllocate(const void *ptr, uptr size) {} static void OnFree(const void *ptr, uptr size) {} }; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h index 082d215..182dc8f 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h @@ -83,8 +83,8 @@ int TgKill(pid_t pid, tid_t tid, int sig); uptr GetThreadSelf(); void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, uptr *stack_bottom); -void GetThreadStackAndTls(bool main, uptr *stk_begin, uptr *stk_end, - uptr *tls_begin, uptr *tls_end); +void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, + uptr *tls_addr, uptr *tls_size); // Memory management void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp index 67e77a8..7d88575 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp @@ -20,13 +20,13 @@ namespace __sanitizer { // The Windows implementations of these functions use the win32 API directly, // bypassing libc. #if !SANITIZER_WINDOWS -#if SANITIZER_LINUX +# if SANITIZER_LINUX void LogMessageOnPrintf(const char *str) {} -#endif +# endif void WriteToSyslog(const char *buffer) {} void Abort() { internal__exit(1); } bool CreateDir(const char *pathname) { return false; } -#endif // !SANITIZER_WINDOWS +#endif // !SANITIZER_WINDOWS #if !SANITIZER_WINDOWS && !SANITIZER_APPLE void ListOfModules::init() {} diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp index f0e1e3d..271c92e 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp @@ -32,7 +32,7 @@ void LibIgnore::AddIgnoredLibrary(const char *name_templ) { lib->templ = internal_strdup(name_templ); lib->name = nullptr; lib->real_name = nullptr; - lib->loaded = false; + lib->range_id = kInvalidCodeRangeId; } void LibIgnore::OnLibraryLoaded(const char *name) { @@ -43,7 +43,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) { buf[0]) { for (uptr i = 0; i < count_; i++) { Lib *lib = &libs_[i]; - if (!lib->loaded && (!lib->real_name) && + if (!lib->loaded() && (!lib->real_name) && TemplateMatch(lib->templ, name)) lib->real_name = internal_strdup(buf.data()); } @@ -70,28 +70,31 @@ void LibIgnore::OnLibraryLoaded(const char *name) { Die(); } loaded = true; - if (lib->loaded) + if (lib->loaded()) continue; VReport(1, "Matched called_from_lib suppression '%s' against library" " '%s'\n", lib->templ, mod.full_name()); - lib->loaded = true; lib->name = internal_strdup(mod.full_name()); const uptr idx = atomic_load(&ignored_ranges_count_, memory_order_relaxed); CHECK_LT(idx, ARRAY_SIZE(ignored_code_ranges_)); - ignored_code_ranges_[idx].begin = range.beg; - ignored_code_ranges_[idx].end = range.end; + ignored_code_ranges_[idx].OnLoad(range.beg, range.end); + // Record the index of the ignored range. + lib->range_id = idx; atomic_store(&ignored_ranges_count_, idx + 1, memory_order_release); break; } } - if (lib->loaded && !loaded) { - Report("%s: library '%s' that was matched against called_from_lib" - " suppression '%s' is unloaded\n", - SanitizerToolName, lib->name, lib->templ); - Die(); + if (lib->loaded() && !loaded) { + VReport(1, + "%s: library '%s' that was matched against called_from_lib" + " suppression '%s' is unloaded\n", + SanitizerToolName, lib->name, lib->templ); + // The library is unloaded so mark the ignored code range as unloaded. + ignored_code_ranges_[lib->range_id].OnUnload(); + lib->range_id = kInvalidCodeRangeId; } } @@ -110,8 +113,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) { const uptr idx = atomic_load(&instrumented_ranges_count_, memory_order_relaxed); CHECK_LT(idx, ARRAY_SIZE(instrumented_code_ranges_)); - instrumented_code_ranges_[idx].begin = range.beg; - instrumented_code_ranges_[idx].end = range.end; + instrumented_code_ranges_[idx].OnLoad(range.beg, range.end); atomic_store(&instrumented_ranges_count_, idx + 1, memory_order_release); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_libignore.h b/compiler-rt/lib/sanitizer_common/sanitizer_libignore.h index 18e4d83..0e26ff4 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_libignore.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_libignore.h @@ -49,25 +49,36 @@ class LibIgnore { bool IsPcInstrumented(uptr pc) const; private: + static const uptr kMaxIgnoredRanges = 128; + static const uptr kMaxInstrumentedRanges = 1024; + static const uptr kMaxLibs = 1024; + static const uptr kInvalidCodeRangeId = -1; + struct Lib { char *templ; char *name; char *real_name; // target of symlink - bool loaded; + uptr range_id; + bool loaded() const { return range_id != kInvalidCodeRangeId; }; }; struct LibCodeRange { - uptr begin; - uptr end; - }; + bool IsInRange(uptr pc) const { + return (pc >= begin && pc < atomic_load(&end, memory_order_acquire)); + } - inline bool IsInRange(uptr pc, const LibCodeRange &range) const { - return (pc >= range.begin && pc < range.end); - } + void OnLoad(uptr b, uptr e) { + begin = b; + atomic_store(&end, e, memory_order_release); + } - static const uptr kMaxIgnoredRanges = 128; - static const uptr kMaxInstrumentedRanges = 1024; - static const uptr kMaxLibs = 1024; + void OnUnload() { atomic_store(&end, 0, memory_order_release); } + + private: + uptr begin; + // A value of 0 means the associated module was unloaded. + atomic_uintptr_t end; + }; // Hot part: atomic_uintptr_t ignored_ranges_count_; @@ -90,7 +101,7 @@ class LibIgnore { inline bool LibIgnore::IsIgnored(uptr pc, bool *pc_in_ignored_lib) const { const uptr n = atomic_load(&ignored_ranges_count_, memory_order_acquire); for (uptr i = 0; i < n; i++) { - if (IsInRange(pc, ignored_code_ranges_[i])) { + if (ignored_code_ranges_[i].IsInRange(pc)) { *pc_in_ignored_lib = true; return true; } @@ -104,7 +115,7 @@ inline bool LibIgnore::IsIgnored(uptr pc, bool *pc_in_ignored_lib) const { inline bool LibIgnore::IsPcInstrumented(uptr pc) const { const uptr n = atomic_load(&instrumented_ranges_count_, memory_order_acquire); for (uptr i = 0; i < n; i++) { - if (IsInRange(pc, instrumented_code_ranges_[i])) + if (instrumented_code_ranges_[i].IsInRange(pc)) return true; } return false; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp index 071ecc4..55889ae 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp @@ -234,20 +234,15 @@ void InitTlsSize() { # if defined(__aarch64__) || defined(__x86_64__) || \ defined(__powerpc64__) || defined(__loongarch__) - void *get_tls_static_info = dlsym(RTLD_DEFAULT, "_dl_get_tls_static_info"); + void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info"); size_t tls_align; ((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align); # endif } -# else -void InitTlsSize() {} -# endif // SANITIZER_GLIBC && !SANITIZER_GO // On glibc x86_64, ThreadDescriptorSize() needs to be precise due to the usage // of g_tls_size. On other targets, ThreadDescriptorSize() is only used by lsan // to get the pointer to thread-specific data keys in the thread control block. -# if (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS) && \ - !SANITIZER_ANDROID && !SANITIZER_GO // sizeof(struct pthread) from glibc. static atomic_uintptr_t thread_descriptor_size; @@ -465,8 +460,9 @@ __attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size, *addr = ranges[l].begin; *size = ranges[r - 1].end - ranges[l].begin; } -# endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD || - // SANITIZER_LINUX) && !SANITIZER_ANDROID && !SANITIZER_GO +# else +void InitTlsSize() {} +# endif // SANITIZER_GLIBC && !SANITIZER_GO # if SANITIZER_NETBSD static struct tls_tcb *ThreadSelfTlsTcb() { @@ -626,33 +622,25 @@ uptr GetTlsSize() { } # endif -void GetThreadStackAndTls(bool main, uptr *stk_begin, uptr *stk_end, - uptr *tls_begin, uptr *tls_end) { +void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, + uptr *tls_addr, uptr *tls_size) { # if SANITIZER_GO // Stub implementation for Go. - *stk_begin = 0; - *stk_end = 0; - *tls_begin = 0; - *tls_end = 0; + *stk_addr = *stk_size = *tls_addr = *tls_size = 0; # else - uptr tls_addr = 0; - uptr tls_size = 0; - GetTls(&tls_addr, &tls_size); - *tls_begin = tls_addr; - *tls_end = tls_addr + tls_size; + GetTls(tls_addr, tls_size); uptr stack_top, stack_bottom; GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); - *stk_begin = stack_bottom; - *stk_end = stack_top; + *stk_addr = stack_bottom; + *stk_size = stack_top - stack_bottom; if (!main) { // If stack and tls intersect, make them non-intersecting. - CHECK_GE(*tls_begin, *stk_begin); - if (*tls_begin > *stk_begin && *tls_begin < *stk_end) { - if (*stk_end > *tls_end) - *tls_end = *stk_end; - *stk_end = *tls_begin; + if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) { + if (*stk_addr + *stk_size < *tls_addr + *tls_size) + *tls_size = *stk_addr + *stk_size - *tls_addr; + *stk_size = *tls_addr - *stk_addr; } } # endif diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp index b4a5d68..35717c6 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp @@ -572,18 +572,21 @@ uptr TlsSize() { #endif } -void GetThreadStackAndTls(bool main, uptr *stk_begin, uptr *stk_end, - uptr *tls_begin, uptr *tls_end) { -# if !SANITIZER_GO - GetThreadStackTopAndBottom(main, stk_begin, stk_end); - *tls_begin = TlsBaseAddr(); - *tls_end = *tls_begin + TlsSize(); -# else - *stk_begin = 0; - *stk_end = 0; - *tls_begin = 0; - *tls_end = 0; -# endif +void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, + uptr *tls_addr, uptr *tls_size) { +#if !SANITIZER_GO + uptr stack_top, stack_bottom; + GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); + *stk_addr = stack_bottom; + *stk_size = stack_top - stack_bottom; + *tls_addr = TlsBaseAddr(); + *tls_size = TlsSize(); +#else + *stk_addr = 0; + *stk_size = 0; + *tls_addr = 0; + *tls_size = 0; +#endif } void ListOfModules::init() { diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp index a17a148..087bd80 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp @@ -130,7 +130,6 @@ DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res, DTLS::DTV *dtv = DTLS_Find(dso_id); if (!dtv || dtv->beg) return nullptr; - CHECK_LE(static_tls_begin, static_tls_end); uptr tls_size = 0; uptr tls_beg = reinterpret_cast<uptr>(res) - arg->offset - kDtvOffset; VReport(2, diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp index d8f51bf..8a80d54 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp @@ -876,18 +876,21 @@ uptr GetTlsSize() { void InitTlsSize() { } -void GetThreadStackAndTls(bool main, uptr *stk_begin, uptr *stk_end, - uptr *tls_begin, uptr *tls_end) { -# if SANITIZER_GO - *stk_begin = 0; - *stk_end = 0; - *tls_begin = 0; - *tls_end = 0; -# else - GetThreadStackTopAndBottom(main, stk_begin, stk_end); - *tls_begin = 0; - *tls_end = 0; -# endif +void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, + uptr *tls_addr, uptr *tls_size) { +#if SANITIZER_GO + *stk_addr = 0; + *stk_size = 0; + *tls_addr = 0; + *tls_size = 0; +#else + uptr stack_top, stack_bottom; + GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); + *stk_addr = stack_bottom; + *stk_size = stack_top - stack_bottom; + *tls_addr = 0; + *tls_size = 0; +#endif } void ReportFile::Write(const char *buffer, uptr length) { diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_common_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_common_test.cpp index 7fd6bad..918d824 100644 --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_common_test.cpp +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_common_test.cpp @@ -204,29 +204,30 @@ TEST(SanitizerCommon, InternalMmapVectorSwap) { } void TestThreadInfo(bool main) { - uptr stk_begin = 0; - uptr stk_end = 0; - uptr tls_begin = 0; - uptr tls_end = 0; - GetThreadStackAndTls(main, &stk_begin, &stk_end, &tls_begin, &tls_end); + uptr stk_addr = 0; + uptr stk_size = 0; + uptr tls_addr = 0; + uptr tls_size = 0; + GetThreadStackAndTls(main, &stk_addr, &stk_size, &tls_addr, &tls_size); int stack_var; - EXPECT_NE(stk_begin, (uptr)0); - EXPECT_GT(stk_end, stk_begin); - EXPECT_GT((uptr)&stack_var, stk_begin); - EXPECT_LT((uptr)&stack_var, stk_end); + EXPECT_NE(stk_addr, (uptr)0); + EXPECT_NE(stk_size, (uptr)0); + EXPECT_GT((uptr)&stack_var, stk_addr); + EXPECT_LT((uptr)&stack_var, stk_addr + stk_size); #if SANITIZER_LINUX && defined(__x86_64__) static __thread int thread_var; - EXPECT_NE(tls_begin, (uptr)0); - EXPECT_GT(tls_end, tls_begin); - EXPECT_GT((uptr)&thread_var, tls_begin); - EXPECT_LT((uptr)&thread_var, tls_end); + EXPECT_NE(tls_addr, (uptr)0); + EXPECT_NE(tls_size, (uptr)0); + EXPECT_GT((uptr)&thread_var, tls_addr); + EXPECT_LT((uptr)&thread_var, tls_addr + tls_size); // Ensure that tls and stack do not intersect. - EXPECT_TRUE(tls_begin < stk_begin || tls_begin >= stk_end); - EXPECT_TRUE(tls_end < stk_begin || tls_end >= stk_end); - EXPECT_TRUE((tls_begin < stk_begin) == (tls_end < stk_begin)); + uptr tls_end = tls_addr + tls_size; + EXPECT_TRUE(tls_addr < stk_addr || tls_addr >= stk_addr + stk_size); + EXPECT_TRUE(tls_end < stk_addr || tls_end >= stk_addr + stk_size); + EXPECT_TRUE((tls_addr < stk_addr) == (tls_end < stk_addr)); #endif } diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_linux_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_linux_test.cpp index 025cba9..338c4d3 100644 --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_linux_test.cpp +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_linux_test.cpp @@ -13,18 +13,17 @@ #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_LINUX -#include "sanitizer_common/sanitizer_linux.h" +# include <pthread.h> +# include <sched.h> +# include <stdlib.h> -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_file.h" -#include "gtest/gtest.h" +# include <algorithm> +# include <vector> -#include <pthread.h> -#include <sched.h> -#include <stdlib.h> - -#include <algorithm> -#include <vector> +# include "gtest/gtest.h" +# include "sanitizer_common/sanitizer_common.h" +# include "sanitizer_common/sanitizer_file.h" +# include "sanitizer_common/sanitizer_linux.h" namespace __sanitizer { @@ -109,9 +108,8 @@ void *TidReporterThread(void *argument) { void ThreadListerTest::SpawnTidReporter(pthread_t *pthread_id, tid_t *tid) { pthread_mutex_lock(&thread_arg.tid_reported_mutex); thread_arg.reported_tid = -1; - ASSERT_EQ(0, pthread_create(pthread_id, NULL, - TidReporterThread, - &thread_arg)); + ASSERT_EQ(0, + pthread_create(pthread_id, NULL, TidReporterThread, &thread_arg)); while (thread_arg.reported_tid == (tid_t)(-1)) pthread_cond_wait(&thread_arg.tid_reported_cond, &thread_arg.tid_reported_mutex); @@ -129,8 +127,8 @@ static std::vector<tid_t> ReadTidsToVector(ThreadLister *thread_lister) { static bool Includes(std::vector<tid_t> first, std::vector<tid_t> second) { std::sort(first.begin(), first.end()); std::sort(second.begin(), second.end()); - return std::includes(first.begin(), first.end(), - second.begin(), second.end()); + return std::includes(first.begin(), first.end(), second.begin(), + second.end()); } static bool HasElement(const std::vector<tid_t> &vector, tid_t element) { @@ -187,7 +185,7 @@ TEST(SanitizerCommon, SetEnvTest) { EXPECT_EQ(0, getenv(kEnvName)); } -#if (defined(__x86_64__) || defined(__i386__)) && !SANITIZER_ANDROID +# if (defined(__x86_64__) || defined(__i386__)) && !SANITIZER_ANDROID // libpthread puts the thread descriptor at the end of stack space. void *thread_descriptor_size_test_func(void *arg) { uptr descr_addr = (uptr)pthread_self(); @@ -206,48 +204,48 @@ TEST(SanitizerLinux, ThreadDescriptorSize) { ASSERT_EQ(0, pthread_join(tid, &result)); EXPECT_EQ((uptr)result, ThreadDescriptorSize()); } -#endif +# endif TEST(SanitizerCommon, LibraryNameIs) { EXPECT_FALSE(LibraryNameIs("", "")); char full_name[256]; - const char *paths[] = { "", "/", "/path/to/" }; - const char *suffixes[] = { "", "-linux", ".1.2", "-linux.1.2" }; - const char *base_names[] = { "lib", "lib.0", "lib-i386" }; - const char *wrong_names[] = { "", "lib.9", "lib-x86_64" }; + const char *paths[] = {"", "/", "/path/to/"}; + const char *suffixes[] = {"", "-linux", ".1.2", "-linux.1.2"}; + const char *base_names[] = {"lib", "lib.0", "lib-i386"}; + const char *wrong_names[] = {"", "lib.9", "lib-x86_64"}; for (uptr i = 0; i < ARRAY_SIZE(paths); i++) for (uptr j = 0; j < ARRAY_SIZE(suffixes); j++) { for (uptr k = 0; k < ARRAY_SIZE(base_names); k++) { internal_snprintf(full_name, ARRAY_SIZE(full_name), "%s%s%s.so", paths[i], base_names[k], suffixes[j]); EXPECT_TRUE(LibraryNameIs(full_name, base_names[k])) - << "Full name " << full_name - << " doesn't match base name " << base_names[k]; + << "Full name " << full_name << " doesn't match base name " + << base_names[k]; for (uptr m = 0; m < ARRAY_SIZE(wrong_names); m++) EXPECT_FALSE(LibraryNameIs(full_name, wrong_names[m])) - << "Full name " << full_name - << " matches base name " << wrong_names[m]; + << "Full name " << full_name << " matches base name " + << wrong_names[m]; } } } -#if defined(__mips64) +# if defined(__mips64) // Effectively, this is a test for ThreadDescriptorSize() which is used to // compute ThreadSelf(). TEST(SanitizerLinux, ThreadSelfTest) { ASSERT_EQ(pthread_self(), ThreadSelf()); } -#endif +# endif TEST(SanitizerCommon, StartSubprocessTest) { int pipe_fds[2]; ASSERT_EQ(0, pipe(pipe_fds)); -#if SANITIZER_ANDROID +# if SANITIZER_ANDROID const char *shell = "/system/bin/sh"; -#else +# else const char *shell = "/bin/sh"; -#endif +# endif const char *argv[] = {shell, "-c", "echo -n 'hello'", (char *)NULL}; int pid = StartSubprocess(shell, argv, GetEnviron(), /* stdin */ kInvalidFd, /* stdout */ pipe_fds[1]); diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp index 1be65bc..460cbac 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp @@ -678,12 +678,12 @@ TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) { return user_memalign(thr, pc, align, sz); } -TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) { +TSAN_INTERCEPTOR(void *, calloc, uptr n, uptr size) { if (in_symbolizer()) - return InternalCalloc(size, n); + return InternalCalloc(n, size); void *p = 0; { - SCOPED_INTERCEPTOR_RAW(calloc, size, n); + SCOPED_INTERCEPTOR_RAW(calloc, n, size); p = user_calloc(thr, pc, size, n); } invoke_malloc_hook(p, n * size); @@ -703,13 +703,13 @@ TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) { return p; } -TSAN_INTERCEPTOR(void*, reallocarray, void *p, uptr size, uptr n) { +TSAN_INTERCEPTOR(void *, reallocarray, void *p, uptr n, uptr size) { if (in_symbolizer()) - return InternalReallocArray(p, size, n); + return InternalReallocArray(p, n, size); if (p) invoke_free_hook(p); { - SCOPED_INTERCEPTOR_RAW(reallocarray, p, size, n); + SCOPED_INTERCEPTOR_RAW(reallocarray, p, n, size); p = user_reallocarray(thr, pc, p, size, n); } invoke_malloc_hook(p, size); diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp index 0705365..0ea83fb 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp @@ -252,7 +252,7 @@ void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) { if (AllocatorMayReturnNull()) return SetErrnoOnNull(nullptr); GET_STACK_TRACE_FATAL(thr, pc); - ReportReallocArrayOverflow(size, n, &stack); + ReportReallocArrayOverflow(n, size, &stack); } return user_realloc(thr, pc, p, size * n); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp index 8d29e25..5316a78 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp @@ -165,16 +165,14 @@ void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id, #endif uptr stk_addr = 0; - uptr stk_end = 0; + uptr stk_size = 0; uptr tls_addr = 0; - uptr tls_end = 0; + uptr tls_size = 0; #if !SANITIZER_GO if (thread_type != ThreadType::Fiber) - GetThreadStackAndTls(tid == kMainTid, &stk_addr, &stk_end, &tls_addr, - &tls_end); + GetThreadStackAndTls(tid == kMainTid, &stk_addr, &stk_size, &tls_addr, + &tls_size); #endif - uptr stk_size = stk_end - stk_addr; - uptr tls_size = tls_end - tls_addr; thr->stk_addr = stk_addr; thr->stk_size = stk_size; thr->tls_addr = tls_addr; diff --git a/compiler-rt/test/asan/TestCases/global-overflow.cpp b/compiler-rt/test/asan/TestCases/global-overflow.cpp index df44d67..ed276ca 100644 --- a/compiler-rt/test/asan/TestCases/global-overflow.cpp +++ b/compiler-rt/test/asan/TestCases/global-overflow.cpp @@ -3,6 +3,9 @@ // RUN: %clangxx_asan -O2 %s -o %t && not %run %t 2>&1 | FileCheck %s // RUN: %clangxx_asan -O3 %s -o %t && not %run %t 2>&1 | FileCheck %s +// Issue #108194: Incomplete .debug_line at -O1 and above. +// XFAIL: target={{.*sparc.*}} + #include <string.h> int main(int argc, char **argv) { static char XXX[10]; diff --git a/compiler-rt/test/asan/TestCases/large_func_test.cpp b/compiler-rt/test/asan/TestCases/large_func_test.cpp index 37fec8b..c64fc7d 100644 --- a/compiler-rt/test/asan/TestCases/large_func_test.cpp +++ b/compiler-rt/test/asan/TestCases/large_func_test.cpp @@ -4,6 +4,9 @@ // RUN: %clangxx_asan -O3 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK-%os --check-prefix=CHECK // REQUIRES: stable-runtime +// Issue #108194: Incomplete .debug_line at -O1 and above. +// XFAIL: target={{.*sparc.*}} + #include <stdlib.h> __attribute__((noinline)) static void LargeFunction(int *x, int zero) { diff --git a/compiler-rt/test/sanitizer_common/TestCases/Linux/tls_malloc_hook.c b/compiler-rt/test/sanitizer_common/TestCases/Linux/tls_malloc_hook.c deleted file mode 100644 index 587f3b1..0000000 --- a/compiler-rt/test/sanitizer_common/TestCases/Linux/tls_malloc_hook.c +++ /dev/null @@ -1,56 +0,0 @@ -// Test that we don't crash accessing DTLS from malloc hook. - -// RUN: %clang %s -o %t -// RUN: %clang %s -DBUILD_SO -fPIC -o %t-so.so -shared -// RUN: %run %t 2>&1 | FileCheck %s - -// REQUIRES: glibc - -// No allocator and hooks. -// XFAIL: ubsan - -#ifndef BUILD_SO -# include <assert.h> -# include <dlfcn.h> -# include <pthread.h> -# include <stdio.h> -# include <stdlib.h> - -typedef long *(*get_t)(); -get_t GetTls; -void *Thread(void *unused) { return GetTls(); } - -__thread long recursive_hook; - -// CHECK: __sanitizer_malloc_hook: -void __sanitizer_malloc_hook(const volatile void *ptr, size_t sz) - __attribute__((disable_sanitizer_instrumentation)) { - ++recursive_hook; - if (recursive_hook == 1 && GetTls) - fprintf(stderr, "__sanitizer_malloc_hook: %p\n", GetTls()); - --recursive_hook; -} - -int main(int argc, char *argv[]) { - char path[4096]; - snprintf(path, sizeof(path), "%s-so.so", argv[0]); - int i; - - void *handle = dlopen(path, RTLD_LAZY); - if (!handle) - fprintf(stderr, "%s\n", dlerror()); - assert(handle != 0); - GetTls = (get_t)dlsym(handle, "GetTls"); - assert(dlerror() == 0); - - pthread_t t; - pthread_create(&t, 0, Thread, 0); - pthread_join(t, 0); - pthread_create(&t, 0, Thread, 0); - pthread_join(t, 0); - return 0; -} -#else // BUILD_SO -__thread long huge_thread_local_array[1 << 17]; -long *GetTls() { return &huge_thread_local_array[0]; } -#endif diff --git a/compiler-rt/test/sanitizer_common/TestCases/sanitizer_coverage_trace_pc_guard-dso.cpp b/compiler-rt/test/sanitizer_common/TestCases/sanitizer_coverage_trace_pc_guard-dso.cpp index 9a27bc8..f6ccbb6 100644 --- a/compiler-rt/test/sanitizer_common/TestCases/sanitizer_coverage_trace_pc_guard-dso.cpp +++ b/compiler-rt/test/sanitizer_common/TestCases/sanitizer_coverage_trace_pc_guard-dso.cpp @@ -1,7 +1,8 @@ // Tests trace pc guard coverage collection. // REQUIRES: has_sancovcc -// UNSUPPORTED: ubsan,target={{(powerpc64|s390x|thumb).*}} +// Doesn't work on big-endian targets. +// UNSUPPORTED: ubsan,target={{(powerpc64|s390x|sparc|thumb).*}} // XFAIL: tsan,darwin // XFAIL: android && asan diff --git a/compiler-rt/test/sanitizer_common/TestCases/sanitizer_coverage_trace_pc_guard.cpp b/compiler-rt/test/sanitizer_common/TestCases/sanitizer_coverage_trace_pc_guard.cpp index b4b4914..84c28e8 100644 --- a/compiler-rt/test/sanitizer_common/TestCases/sanitizer_coverage_trace_pc_guard.cpp +++ b/compiler-rt/test/sanitizer_common/TestCases/sanitizer_coverage_trace_pc_guard.cpp @@ -1,7 +1,7 @@ // Tests trace pc guard coverage collection. // REQUIRES: has_sancovcc -// UNSUPPORTED: ubsan,i386-darwin,target={{(powerpc64|s390x|thumb).*}} +// UNSUPPORTED: ubsan,i386-darwin,target={{(powerpc64|s390x|sparc|thumb).*}} // This test is failing for lsan on darwin on x86_64h. // UNSUPPORTED: x86_64h-darwin && lsan // XFAIL: tsan diff --git a/compiler-rt/test/tsan/ignore_lib3.cpp b/compiler-rt/test/tsan/ignore_lib3.cpp index b1a3940d..a919a3e 100644 --- a/compiler-rt/test/tsan/ignore_lib3.cpp +++ b/compiler-rt/test/tsan/ignore_lib3.cpp @@ -3,10 +3,10 @@ // RUN: %clangxx_tsan -O1 %s -DLIB -fPIC -fno-sanitize=thread -shared -o %t-dir/libignore_lib3.so // RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -o %t-dir/executable -// RUN: %env_tsan_opts=suppressions='%s.supp' %deflake %run %t-dir/executable | FileCheck %s +// RUN: %env_tsan_opts=suppressions='%s.supp':verbosity=1 %run %t-dir/executable 2>&1 | FileCheck %s // Tests that unloading of a library matched against called_from_lib suppression -// causes program crash (this is not supported). +// is supported. // Some aarch64 kernels do not support non executable write pages // REQUIRES: stable-runtime @@ -22,18 +22,30 @@ int main(int argc, char **argv) { std::string lib = std::string(dirname(argv[0])) + "/libignore_lib3.so"; - void *h = dlopen(lib.c_str(), RTLD_GLOBAL | RTLD_NOW); - dlclose(h); + void *h; + void (*f)(); + // Try opening, closing and reopening the ignored lib. + for (unsigned int k = 0; k < 2; k++) { + h = dlopen(lib.c_str(), RTLD_GLOBAL | RTLD_NOW); + if (h == 0) + exit(printf("failed to load the library (%d)\n", errno)); + f = (void (*)())dlsym(h, "libfunc"); + if (f == 0) + exit(printf("failed to find the func (%d)\n", errno)); + f(); + dlclose(h); + } fprintf(stderr, "OK\n"); } #else // #ifdef LIB -extern "C" void libfunc() { -} +# include "ignore_lib_lib.h" #endif // #ifdef LIB -// CHECK: ThreadSanitizer: library {{.*}} that was matched against called_from_lib suppression 'ignore_lib3.so' is unloaded -// CHECK-NOT: OK - +// CHECK: Matched called_from_lib suppression 'ignore_lib3.so' +// CHECK: library '{{.*}}ignore_lib3.so' that was matched against called_from_lib suppression 'ignore_lib3.so' is unloaded +// CHECK: Matched called_from_lib suppression 'ignore_lib3.so' +// CHECK: library '{{.*}}ignore_lib3.so' that was matched against called_from_lib suppression 'ignore_lib3.so' is unloaded +// CHECK: OK diff --git a/flang/include/flang/Optimizer/Builder/FIRBuilder.h b/flang/include/flang/Optimizer/Builder/FIRBuilder.h index f7151f2..180e2c8a 100644 --- a/flang/include/flang/Optimizer/Builder/FIRBuilder.h +++ b/flang/include/flang/Optimizer/Builder/FIRBuilder.h @@ -29,6 +29,7 @@ #include <utility> namespace mlir { +class DataLayout; class SymbolTable; } @@ -253,6 +254,15 @@ public: mlir::ValueRange lenParams = {}, llvm::ArrayRef<mlir::NamedAttribute> attrs = {}); + /// Create an LLVM stack save intrinsic op. Returns the saved stack pointer. + /// The stack address space is fetched from the data layout of the current + /// module. + mlir::Value genStackSave(mlir::Location loc); + + /// Create an LLVM stack restore intrinsic op. stackPointer should be a value + /// previously returned from genStackSave. + void genStackRestore(mlir::Location loc, mlir::Value stackPointer); + /// Create a global value. fir::GlobalOp createGlobal(mlir::Location loc, mlir::Type type, llvm::StringRef name, @@ -523,6 +533,9 @@ public: setCommonAttributes(op); } + /// Construct a data layout on demand and return it + mlir::DataLayout &getDataLayout(); + private: /// Set attributes (e.g. FastMathAttr) to \p op operation /// based on the current attributes setting. @@ -537,6 +550,11 @@ private: /// fir::GlobalOp and func::FuncOp symbol table to speed-up /// lookups. mlir::SymbolTable *symbolTable = nullptr; + + /// DataLayout constructed on demand. Access via getDataLayout(). + /// Stored via a unique_ptr rather than an optional so as not to bloat this + /// class when most instances won't ever need a data layout. + std::unique_ptr<mlir::DataLayout> dataLayout = nullptr; }; } // namespace fir @@ -729,6 +747,9 @@ elideExtentsAlreadyInType(mlir::Type type, mlir::ValueRange shape); llvm::SmallVector<mlir::Value> elideLengthsAlreadyInType(mlir::Type type, mlir::ValueRange lenParams); +/// Get the address space which should be used for allocas +uint64_t getAllocaAddressSpace(mlir::DataLayout *dataLayout); + } // namespace fir::factory #endif // FORTRAN_OPTIMIZER_BUILDER_FIRBUILDER_H diff --git a/flang/include/flang/Optimizer/Builder/LowLevelIntrinsics.h b/flang/include/flang/Optimizer/Builder/LowLevelIntrinsics.h index e5a7113..9be0516 100644 --- a/flang/include/flang/Optimizer/Builder/LowLevelIntrinsics.h +++ b/flang/include/flang/Optimizer/Builder/LowLevelIntrinsics.h @@ -42,12 +42,6 @@ mlir::func::FuncOp getLlvmGetRounding(FirOpBuilder &builder); /// Get the `llvm.set.rounding` intrinsic. mlir::func::FuncOp getLlvmSetRounding(FirOpBuilder &builder); -/// Get the `llvm.stacksave` intrinsic. -mlir::func::FuncOp getLlvmStackSave(FirOpBuilder &builder); - -/// Get the `llvm.stackrestore` intrinsic. -mlir::func::FuncOp getLlvmStackRestore(FirOpBuilder &builder); - /// Get the `llvm.init.trampoline` intrinsic. mlir::func::FuncOp getLlvmInitTrampoline(FirOpBuilder &builder); diff --git a/flang/include/flang/Optimizer/Support/DataLayout.h b/flang/include/flang/Optimizer/Support/DataLayout.h index d21576b..6072425 100644 --- a/flang/include/flang/Optimizer/Support/DataLayout.h +++ b/flang/include/flang/Optimizer/Support/DataLayout.h @@ -45,7 +45,6 @@ void setMLIRDataLayoutFromAttributes(mlir::ModuleOp mlirModule, /// std::nullopt. std::optional<mlir::DataLayout> getOrSetDataLayout(mlir::ModuleOp mlirModule, bool allowDefaultLayout = false); - } // namespace fir::support #endif // FORTRAN_OPTIMIZER_SUPPORT_DATALAYOUT_H diff --git a/flang/include/flang/Runtime/CUDA/memory.h b/flang/include/flang/Runtime/CUDA/memory.h new file mode 100644 index 0000000..8fd5112 --- /dev/null +++ b/flang/include/flang/Runtime/CUDA/memory.h @@ -0,0 +1,47 @@ +//===-- include/flang/Runtime/CUDA/memory.h ---------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef FORTRAN_RUNTIME_CUDA_MEMORY_H_ +#define FORTRAN_RUNTIME_CUDA_MEMORY_H_ + +#include "flang/Runtime/descriptor.h" +#include "flang/Runtime/entry-names.h" +#include <cstddef> + +static constexpr unsigned kHostToDevice = 0; +static constexpr unsigned kDeviceToHost = 1; +static constexpr unsigned kDeviceToDevice = 2; + +namespace Fortran::runtime::cuda { + +extern "C" { + +/// Set value to the data hold by a descriptor. The \p value pointer must be +/// addressable to the same amount of bytes specified by the element size of +/// the descriptor \p desc. +void RTDECL(CUFMemsetDescriptor)(const Descriptor &desc, void *value, + const char *sourceFile = nullptr, int sourceLine = 0); + +/// Data transfer from a pointer to a descriptor. +void RTDECL(CUFDataTransferDescPtr)(const Descriptor &dst, void *src, + std::size_t bytes, unsigned mode, const char *sourceFile = nullptr, + int sourceLine = 0); + +/// Data transfer from a descriptor to a pointer. +void RTDECL(CUFDataTransferPtrDesc)(void *dst, const Descriptor &src, + std::size_t bytes, unsigned mode, const char *sourceFile = nullptr, + int sourceLine = 0); + +/// Data transfer from a descriptor to a descriptor. +void RTDECL(CUFDataTransferDescDesc)(const Descriptor &dst, + const Descriptor &src, unsigned mode, const char *sourceFile = nullptr, + int sourceLine = 0); + +} // extern "C" +} // namespace Fortran::runtime::cuda +#endif // FORTRAN_RUNTIME_CUDA_MEMORY_H_ diff --git a/flang/lib/Frontend/TextDiagnosticPrinter.cpp b/flang/lib/Frontend/TextDiagnosticPrinter.cpp index 1e6414f..8b00fb6 100644 --- a/flang/lib/Frontend/TextDiagnosticPrinter.cpp +++ b/flang/lib/Frontend/TextDiagnosticPrinter.cpp @@ -38,8 +38,8 @@ TextDiagnosticPrinter::~TextDiagnosticPrinter() {} static void printRemarkOption(llvm::raw_ostream &os, clang::DiagnosticsEngine::Level level, const clang::Diagnostic &info) { - llvm::StringRef opt = - clang::DiagnosticIDs::getWarningOptionForDiag(info.getID()); + llvm::StringRef opt = info.getDiags()->getDiagnosticIDs() + ->getWarningOptionForDiag(info.getID()); if (!opt.empty()) { // We still need to check if the level is a Remark since, an unknown option // warning could be printed i.e. [-Wunknown-warning-option] diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp index 79e5a04..ebcb761 100644 --- a/flang/lib/Lower/Bridge.cpp +++ b/flang/lib/Lower/Bridge.cpp @@ -3257,15 +3257,10 @@ private: const Fortran::parser::CharBlock &endPosition = eval.getLastNestedEvaluation().position; localSymbols.pushScope(); - mlir::func::FuncOp stackSave = fir::factory::getLlvmStackSave(*builder); - mlir::func::FuncOp stackRestore = - fir::factory::getLlvmStackRestore(*builder); - mlir::Value stackPtr = - builder->create<fir::CallOp>(toLocation(), stackSave).getResult(0); + mlir::Value stackPtr = builder->genStackSave(toLocation()); mlir::Location endLoc = genLocation(endPosition); - stmtCtx.attachCleanup([=]() { - builder->create<fir::CallOp>(endLoc, stackRestore, stackPtr); - }); + stmtCtx.attachCleanup( + [=]() { builder->genStackRestore(endLoc, stackPtr); }); Fortran::semantics::Scope &scope = bridge.getSemanticsContext().FindScope(endPosition); scopeBlockIdMap.try_emplace(&scope, ++blockId); diff --git a/flang/lib/Lower/ConvertCall.cpp b/flang/lib/Lower/ConvertCall.cpp index f445a21..2fedc01 100644 --- a/flang/lib/Lower/ConvertCall.cpp +++ b/flang/lib/Lower/ConvertCall.cpp @@ -368,22 +368,9 @@ std::pair<fir::ExtendedValue, bool> Fortran::lower::genCallOpAndResult( if (!extents.empty() || !lengths.empty()) { auto *bldr = &converter.getFirOpBuilder(); - auto stackSaveFn = fir::factory::getLlvmStackSave(builder); - auto stackSaveSymbol = bldr->getSymbolRefAttr(stackSaveFn.getName()); - mlir::Value sp; - fir::CallOp call = bldr->create<fir::CallOp>( - loc, stackSaveSymbol, stackSaveFn.getFunctionType().getResults(), - mlir::ValueRange{}); - if (call.getNumResults() != 0) - sp = call.getResult(0); - stmtCtx.attachCleanup([bldr, loc, sp]() { - auto stackRestoreFn = fir::factory::getLlvmStackRestore(*bldr); - auto stackRestoreSymbol = - bldr->getSymbolRefAttr(stackRestoreFn.getName()); - bldr->create<fir::CallOp>(loc, stackRestoreSymbol, - stackRestoreFn.getFunctionType().getResults(), - mlir::ValueRange{sp}); - }); + mlir::Value sp = bldr->genStackSave(loc); + stmtCtx.attachCleanup( + [bldr, loc, sp]() { bldr->genStackRestore(loc, sp); }); } mlir::Value temp = builder.createTemporary(loc, type, ".result", extents, resultLengths); @@ -1206,10 +1193,26 @@ static PreparedDummyArgument preparePresentUserCallActualArgument( // is set (descriptors must be created with the actual type in this case, and // copy-in/copy-out should be driven by the contiguity with regard to the // actual type). - if (ignoreTKRtype) - dummyTypeWithActualRank = fir::changeElementType( - dummyTypeWithActualRank, actual.getFortranElementType(), - actual.isPolymorphic()); + if (ignoreTKRtype) { + if (auto boxCharType = + mlir::dyn_cast<fir::BoxCharType>(dummyTypeWithActualRank)) { + auto maybeActualCharType = + mlir::dyn_cast<fir::CharacterType>(actual.getFortranElementType()); + if (!maybeActualCharType || + maybeActualCharType.getFKind() != boxCharType.getKind()) { + // When passing to a fir.boxchar with ignore(tk), prepare the argument + // as if only the raw address must be passed. + dummyTypeWithActualRank = + fir::ReferenceType::get(actual.getElementOrSequenceType()); + } + // Otherwise, the actual is already a character with the same kind as the + // dummy and can be passed normally. + } else { + dummyTypeWithActualRank = fir::changeElementType( + dummyTypeWithActualRank, actual.getFortranElementType(), + actual.isPolymorphic()); + } + } PreparedDummyArgument preparedDummy; diff --git a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp index fa8a430..e9ef857 100644 --- a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp +++ b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp @@ -1009,6 +1009,37 @@ bool ClauseProcessor::processMap( return clauseFound; } +bool ClauseProcessor::processMotionClauses(lower::StatementContext &stmtCtx, + mlir::omp::MapClauseOps &result) { + std::map<const semantics::Symbol *, + llvm::SmallVector<OmpMapMemberIndicesData>> + parentMemberIndices; + llvm::SmallVector<const semantics::Symbol *> mapSymbols; + + auto callbackFn = [&](const auto &clause, const parser::CharBlock &source) { + mlir::Location clauseLocation = converter.genLocation(source); + + // TODO Support motion modifiers: present, mapper, iterator. + constexpr llvm::omp::OpenMPOffloadMappingFlags mapTypeBits = + std::is_same_v<llvm::remove_cvref_t<decltype(clause)>, omp::clause::To> + ? llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO + : llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM; + + processMapObjects(stmtCtx, clauseLocation, std::get<ObjectList>(clause.t), + mapTypeBits, parentMemberIndices, result.mapVars, + &mapSymbols); + }; + + bool clauseFound = findRepeatableClause<omp::clause::To>(callbackFn); + clauseFound = + findRepeatableClause<omp::clause::From>(callbackFn) || clauseFound; + + insertChildMapInfoIntoParent(converter, parentMemberIndices, result.mapVars, + mapSymbols, + /*mapSymTypes=*/nullptr, /*mapSymLocs=*/nullptr); + return clauseFound; +} + bool ClauseProcessor::processNontemporal( mlir::omp::NontemporalClauseOps &result) const { return findRepeatableClause<omp::clause::Nontemporal>( diff --git a/flang/lib/Lower/OpenMP/ClauseProcessor.h b/flang/lib/Lower/OpenMP/ClauseProcessor.h index be1d8a6..0c8e7bd 100644 --- a/flang/lib/Lower/OpenMP/ClauseProcessor.h +++ b/flang/lib/Lower/OpenMP/ClauseProcessor.h @@ -121,6 +121,8 @@ public: llvm::SmallVectorImpl<const semantics::Symbol *> *mapSyms = nullptr, llvm::SmallVectorImpl<mlir::Location> *mapSymLocs = nullptr, llvm::SmallVectorImpl<mlir::Type> *mapSymTypes = nullptr) const; + bool processMotionClauses(lower::StatementContext &stmtCtx, + mlir::omp::MapClauseOps &result); bool processNontemporal(mlir::omp::NontemporalClauseOps &result) const; bool processReduction( mlir::Location currentLocation, mlir::omp::ReductionClauseOps &result, @@ -141,9 +143,6 @@ public: llvm::SmallVectorImpl<mlir::Location> &useDeviceLocs, llvm::SmallVectorImpl<const semantics::Symbol *> &useDeviceSyms) const; - template <typename T> - bool processMotionClauses(lower::StatementContext &stmtCtx, - mlir::omp::MapClauseOps &result); // Call this method for these clauses that should be supported but are not // implemented yet. It triggers a compilation error if any of the given // clauses is found. @@ -191,38 +190,6 @@ private: List<Clause> clauses; }; -template <typename T> -bool ClauseProcessor::processMotionClauses(lower::StatementContext &stmtCtx, - mlir::omp::MapClauseOps &result) { - std::map<const semantics::Symbol *, - llvm::SmallVector<OmpMapMemberIndicesData>> - parentMemberIndices; - llvm::SmallVector<const semantics::Symbol *> mapSymbols; - - bool clauseFound = findRepeatableClause<T>( - [&](const T &clause, const parser::CharBlock &source) { - mlir::Location clauseLocation = converter.genLocation(source); - - static_assert(std::is_same_v<T, omp::clause::To> || - std::is_same_v<T, omp::clause::From>); - - // TODO Support motion modifiers: present, mapper, iterator. - constexpr llvm::omp::OpenMPOffloadMappingFlags mapTypeBits = - std::is_same_v<T, omp::clause::To> - ? llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO - : llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM; - - processMapObjects(stmtCtx, clauseLocation, - std::get<ObjectList>(clause.t), mapTypeBits, - parentMemberIndices, result.mapVars, &mapSymbols); - }); - - insertChildMapInfoIntoParent(converter, parentMemberIndices, result.mapVars, - mapSymbols, - /*mapSymTypes=*/nullptr, /*mapSymLocs=*/nullptr); - return clauseFound; -} - template <typename... Ts> void ClauseProcessor::processTODO(mlir::Location currentLocation, llvm::omp::Directive directive) const { diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp index 99114dc..9602867 100644 --- a/flang/lib/Lower/OpenMP/OpenMP.cpp +++ b/flang/lib/Lower/OpenMP/OpenMP.cpp @@ -1223,12 +1223,10 @@ static void genTargetEnterExitUpdateDataClauses( cp.processDevice(stmtCtx, clauseOps); cp.processIf(directive, clauseOps); - if (directive == llvm::omp::Directive::OMPD_target_update) { - cp.processMotionClauses<clause::To>(stmtCtx, clauseOps); - cp.processMotionClauses<clause::From>(stmtCtx, clauseOps); - } else { + if (directive == llvm::omp::Directive::OMPD_target_update) + cp.processMotionClauses(stmtCtx, clauseOps); + else cp.processMap(loc, stmtCtx, clauseOps); - } cp.processNowait(clauseOps); } diff --git a/flang/lib/Optimizer/Builder/FIRBuilder.cpp b/flang/lib/Optimizer/Builder/FIRBuilder.cpp index d786d79..539235f 100644 --- a/flang/lib/Optimizer/Builder/FIRBuilder.cpp +++ b/flang/lib/Optimizer/Builder/FIRBuilder.cpp @@ -18,6 +18,7 @@ #include "flang/Optimizer/Dialect/FIRAttr.h" #include "flang/Optimizer/Dialect/FIROpsSupport.h" #include "flang/Optimizer/Dialect/FIRType.h" +#include "flang/Optimizer/Support/DataLayout.h" #include "flang/Optimizer/Support/FatalError.h" #include "flang/Optimizer/Support/InternalNames.h" #include "flang/Optimizer/Support/Utils.h" @@ -328,6 +329,17 @@ mlir::Value fir::FirOpBuilder::createHeapTemporary( name, dynamicLength, dynamicShape, attrs); } +mlir::Value fir::FirOpBuilder::genStackSave(mlir::Location loc) { + mlir::Type voidPtr = mlir::LLVM::LLVMPointerType::get( + getContext(), fir::factory::getAllocaAddressSpace(&getDataLayout())); + return create<mlir::LLVM::StackSaveOp>(loc, voidPtr); +} + +void fir::FirOpBuilder::genStackRestore(mlir::Location loc, + mlir::Value stackPointer) { + create<mlir::LLVM::StackRestoreOp>(loc, stackPointer); +} + /// Create a global variable in the (read-only) data section. A global variable /// must have a unique name to identify and reference it. fir::GlobalOp fir::FirOpBuilder::createGlobal( @@ -432,7 +444,9 @@ mlir::Value fir::FirOpBuilder::convertWithSemantics( // argument in characters and use it as the length of the string auto refType = getRefType(boxType.getEleTy()); mlir::Value charBase = createConvert(loc, refType, val); - mlir::Value unknownLen = create<fir::UndefOp>(loc, getIndexType()); + // Do not use fir.undef since llvm optimizer is too harsh when it + // sees such values (may just delete code). + mlir::Value unknownLen = createIntegerConstant(loc, getIndexType(), 0); fir::factory::CharacterExprHelper charHelper{*this, loc}; return charHelper.createEmboxChar(charBase, unknownLen); } @@ -791,6 +805,15 @@ void fir::FirOpBuilder::setFastMathFlags( setFastMathFlags(arithFMF); } +// Construction of an mlir::DataLayout is expensive so only do it on demand and +// memoise it in the builder instance +mlir::DataLayout &fir::FirOpBuilder::getDataLayout() { + if (dataLayout) + return *dataLayout; + dataLayout = std::make_unique<mlir::DataLayout>(getModule()); + return *dataLayout; +} + //===--------------------------------------------------------------------===// // ExtendedValue inquiry helper implementation //===--------------------------------------------------------------------===// @@ -1664,3 +1687,10 @@ void fir::factory::setInternalLinkage(mlir::func::FuncOp func) { mlir::LLVM::LinkageAttr::get(func->getContext(), internalLinkage); func->setAttr("llvm.linkage", linkage); } + +uint64_t fir::factory::getAllocaAddressSpace(mlir::DataLayout *dataLayout) { + if (dataLayout) + if (mlir::Attribute addrSpace = dataLayout->getAllocaMemorySpace()) + return mlir::cast<mlir::IntegerAttr>(addrSpace).getUInt(); + return 0; +} diff --git a/flang/lib/Optimizer/Builder/LowLevelIntrinsics.cpp b/flang/lib/Optimizer/Builder/LowLevelIntrinsics.cpp index bb5f77d..411a486 100644 --- a/flang/lib/Optimizer/Builder/LowLevelIntrinsics.cpp +++ b/flang/lib/Optimizer/Builder/LowLevelIntrinsics.cpp @@ -76,25 +76,6 @@ fir::factory::getLlvmSetRounding(fir::FirOpBuilder &builder) { funcTy); } -mlir::func::FuncOp fir::factory::getLlvmStackSave(fir::FirOpBuilder &builder) { - // FIXME: This should query the target alloca address space - auto ptrTy = builder.getRefType(builder.getIntegerType(8)); - auto funcTy = - mlir::FunctionType::get(builder.getContext(), std::nullopt, {ptrTy}); - return builder.createFunction(builder.getUnknownLoc(), "llvm.stacksave.p0", - funcTy); -} - -mlir::func::FuncOp -fir::factory::getLlvmStackRestore(fir::FirOpBuilder &builder) { - // FIXME: This should query the target alloca address space - auto ptrTy = builder.getRefType(builder.getIntegerType(8)); - auto funcTy = - mlir::FunctionType::get(builder.getContext(), {ptrTy}, std::nullopt); - return builder.createFunction(builder.getUnknownLoc(), "llvm.stackrestore.p0", - funcTy); -} - mlir::func::FuncOp fir::factory::getLlvmInitTrampoline(fir::FirOpBuilder &builder) { auto ptrTy = builder.getRefType(builder.getIntegerType(8)); diff --git a/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp b/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp index a2a9cff..f6cb26f 100644 --- a/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp +++ b/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp @@ -1236,25 +1236,18 @@ private: inline void clearMembers() { setMembers(nullptr, nullptr, nullptr); } - uint64_t getAllocaAddressSpace() const { - if (dataLayout) - if (mlir::Attribute addrSpace = dataLayout->getAllocaMemorySpace()) - return llvm::cast<mlir::IntegerAttr>(addrSpace).getUInt(); - return 0; - } - // Inserts a call to llvm.stacksave at the current insertion // point and the given location. Returns the call's result Value. inline mlir::Value genStackSave(mlir::Location loc) { - mlir::Type voidPtr = mlir::LLVM::LLVMPointerType::get( - rewriter->getContext(), getAllocaAddressSpace()); - return rewriter->create<mlir::LLVM::StackSaveOp>(loc, voidPtr); + fir::FirOpBuilder builder(*rewriter, getModule()); + return builder.genStackSave(loc); } // Inserts a call to llvm.stackrestore at the current insertion // point and the given location and argument. inline void genStackRestore(mlir::Location loc, mlir::Value sp) { - rewriter->create<mlir::LLVM::StackRestoreOp>(loc, sp); + fir::FirOpBuilder builder(*rewriter, getModule()); + return builder.genStackRestore(loc, sp); } fir::CodeGenSpecifics *specifics = nullptr; diff --git a/flang/lib/Optimizer/Dialect/FIRType.cpp b/flang/lib/Optimizer/Dialect/FIRType.cpp index c1debf2..05f6446 100644 --- a/flang/lib/Optimizer/Dialect/FIRType.cpp +++ b/flang/lib/Optimizer/Dialect/FIRType.cpp @@ -1467,4 +1467,4 @@ fir::getTypeSizeAndAlignmentOrCrash(mlir::Location loc, mlir::Type ty, if (result) return *result; TODO(loc, "computing size of a component"); -}
\ No newline at end of file +} diff --git a/flang/lib/Optimizer/Transforms/StackArrays.cpp b/flang/lib/Optimizer/Transforms/StackArrays.cpp index a8f1a74..1b92992 100644 --- a/flang/lib/Optimizer/Transforms/StackArrays.cpp +++ b/flang/lib/Optimizer/Transforms/StackArrays.cpp @@ -734,28 +734,12 @@ void AllocMemConversion::insertStackSaveRestore( auto mod = oldAlloc->getParentOfType<mlir::ModuleOp>(); fir::FirOpBuilder builder{rewriter, mod}; - mlir::func::FuncOp stackSaveFn = fir::factory::getLlvmStackSave(builder); - mlir::SymbolRefAttr stackSaveSym = - builder.getSymbolRefAttr(stackSaveFn.getName()); - builder.setInsertionPoint(oldAlloc); - mlir::Value sp = - builder - .create<fir::CallOp>(oldAlloc.getLoc(), stackSaveSym, - stackSaveFn.getFunctionType().getResults(), - mlir::ValueRange{}) - .getResult(0); - - mlir::func::FuncOp stackRestoreFn = - fir::factory::getLlvmStackRestore(builder); - mlir::SymbolRefAttr stackRestoreSym = - builder.getSymbolRefAttr(stackRestoreFn.getName()); + mlir::Value sp = builder.genStackSave(oldAlloc.getLoc()); auto createStackRestoreCall = [&](mlir::Operation *user) { builder.setInsertionPoint(user); - builder.create<fir::CallOp>(user->getLoc(), stackRestoreSym, - stackRestoreFn.getFunctionType().getResults(), - mlir::ValueRange{sp}); + builder.genStackRestore(user->getLoc(), sp); }; for (mlir::Operation *user : oldAlloc->getUsers()) { diff --git a/flang/lib/Optimizer/Transforms/StackReclaim.cpp b/flang/lib/Optimizer/Transforms/StackReclaim.cpp index 8a60a9e..bd3e49a 100644 --- a/flang/lib/Optimizer/Transforms/StackReclaim.cpp +++ b/flang/lib/Optimizer/Transforms/StackReclaim.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "flang/Common/Fortran.h" +#include "flang/Optimizer/Builder/FIRBuilder.h" #include "flang/Optimizer/Dialect/FIRDialect.h" #include "flang/Optimizer/Dialect/FIROps.h" #include "flang/Optimizer/Transforms/Passes.h" @@ -31,34 +32,20 @@ public: }; } // namespace -uint64_t getAllocaAddressSpace(Operation *op) { - mlir::ModuleOp module = mlir::dyn_cast_or_null<mlir::ModuleOp>(op); - if (!module) - module = op->getParentOfType<mlir::ModuleOp>(); - - if (mlir::Attribute addrSpace = - mlir::DataLayout(module).getAllocaMemorySpace()) - return llvm::cast<mlir::IntegerAttr>(addrSpace).getUInt(); - return 0; -} - void StackReclaimPass::runOnOperation() { auto *op = getOperation(); - auto *context = &getContext(); - mlir::OpBuilder builder(context); - mlir::Type voidPtr = - mlir::LLVM::LLVMPointerType::get(context, getAllocaAddressSpace(op)); + fir::FirOpBuilder builder(op, fir::getKindMapping(op)); op->walk([&](fir::DoLoopOp loopOp) { mlir::Location loc = loopOp.getLoc(); if (!loopOp.getRegion().getOps<fir::AllocaOp>().empty()) { builder.setInsertionPointToStart(&loopOp.getRegion().front()); - auto stackSaveOp = builder.create<LLVM::StackSaveOp>(loc, voidPtr); + mlir::Value sp = builder.genStackSave(loc); auto *terminator = loopOp.getRegion().back().getTerminator(); builder.setInsertionPoint(terminator); - builder.create<LLVM::StackRestoreOp>(loc, stackSaveOp); + builder.genStackRestore(loc, sp); } }); } diff --git a/flang/runtime/CMakeLists.txt b/flang/runtime/CMakeLists.txt index 4537b2d..0ad1b718 100644 --- a/flang/runtime/CMakeLists.txt +++ b/flang/runtime/CMakeLists.txt @@ -251,6 +251,13 @@ if (NOT TARGET FortranFloat128Math) APPEND PROPERTY COMPILE_DEFINITIONS ${f128_defs} ) + get_target_property(f128_include_dirs + FortranFloat128MathILib INTERFACE_INCLUDE_DIRECTORIES + ) + set_property(SOURCE ${f128_sources} + APPEND PROPERTY INCLUDE_DIRECTORIES + ${f128_include_dirs} + ) list(APPEND sources ${f128_sources}) endif() endif() diff --git a/flang/runtime/CUDA/CMakeLists.txt b/flang/runtime/CUDA/CMakeLists.txt index 81055b2..490bb36 100644 --- a/flang/runtime/CUDA/CMakeLists.txt +++ b/flang/runtime/CUDA/CMakeLists.txt @@ -16,6 +16,7 @@ set(CUFRT_LIBNAME CufRuntime_cuda_${CUDAToolkit_VERSION_MAJOR}) add_flang_library(${CUFRT_LIBNAME} allocator.cpp descriptor.cpp + memory.cpp ) if (BUILD_SHARED_LIBS) diff --git a/flang/runtime/CUDA/memory.cpp b/flang/runtime/CUDA/memory.cpp new file mode 100644 index 0000000..a287fa1 --- /dev/null +++ b/flang/runtime/CUDA/memory.cpp @@ -0,0 +1,46 @@ +//===-- runtime/CUDA/memory.cpp -------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "flang/Runtime/CUDA/memory.h" +#include "../terminator.h" + +#include "cuda_runtime.h" + +namespace Fortran::runtime::cuda { +extern "C" { + +void RTDEF(CUFMemsetDescriptor)(const Descriptor &desc, void *value, + const char *sourceFile, int sourceLine) { + Terminator terminator{sourceFile, sourceLine}; + terminator.Crash("not yet implemented: CUDA data transfer from a scalar " + "value to a descriptor"); +} + +void RTDEF(CUFDataTransferDescPtr)(const Descriptor &desc, void *addr, + std::size_t bytes, unsigned mode, const char *sourceFile, int sourceLine) { + Terminator terminator{sourceFile, sourceLine}; + terminator.Crash( + "not yet implemented: CUDA data transfer from a pointer to a descriptor"); +} + +void RTDEF(CUFDataTransferPtrDesc)(void *addr, const Descriptor &desc, + std::size_t bytes, unsigned mode, const char *sourceFile, int sourceLine) { + Terminator terminator{sourceFile, sourceLine}; + terminator.Crash( + "not yet implemented: CUDA data transfer from a descriptor to a pointer"); +} + +void RTDECL(CUFDataTransferDescDesc)(const Descriptor &dstDesc, + const Descriptor &srcDesc, unsigned mode, const char *sourceFile, + int sourceLine) { + Terminator terminator{sourceFile, sourceLine}; + terminator.Crash( + "not yet implemented: CUDA data transfer between two descriptors"); +} +} +} // namespace Fortran::runtime::cuda diff --git a/flang/runtime/Float128Math/CMakeLists.txt b/flang/runtime/Float128Math/CMakeLists.txt index a5f5bec..87f791f 100644 --- a/flang/runtime/Float128Math/CMakeLists.txt +++ b/flang/runtime/Float128Math/CMakeLists.txt @@ -69,6 +69,9 @@ set(sources include_directories(AFTER "${CMAKE_CURRENT_SOURCE_DIR}/..") add_library(FortranFloat128MathILib INTERFACE) +target_include_directories(FortranFloat128MathILib INTERFACE + $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..> + ) if (FLANG_RUNTIME_F128_MATH_LIB) if (${FLANG_RUNTIME_F128_MATH_LIB} STREQUAL "libquadmath") diff --git a/flang/test/HLFIR/order_assignments/where-scheduling.f90 b/flang/test/HLFIR/order_assignments/where-scheduling.f90 index d3665d2..ab87ae9 100644 --- a/flang/test/HLFIR/order_assignments/where-scheduling.f90 +++ b/flang/test/HLFIR/order_assignments/where-scheduling.f90 @@ -134,7 +134,7 @@ end subroutine !CHECK-NEXT: run 1 save : where/mask !CHECK-NEXT: run 2 evaluate: where/region_assign1 !CHECK-LABEL: ------------ scheduling where in _QPonly_once ------------ -!CHECK-NEXT: unknown effect: %{{[0-9]+}} = fir.call @llvm.stacksave.p0() fastmath<contract> : () -> !fir.ref<i8> +!CHECK-NEXT: unknown effect: %{{[0-9]+}} = llvm.intr.stacksave : !llvm.ptr !CHECK-NEXT: run 1 save (w): where/mask !CHECK-NEXT: run 2 evaluate: where/region_assign1 !CHECK-NEXT: run 3 evaluate: where/region_assign2 diff --git a/flang/test/Lower/HLFIR/block_bindc_pocs.f90 b/flang/test/Lower/HLFIR/block_bindc_pocs.f90 index 090eeb3..ed07d88 100644 --- a/flang/test/Lower/HLFIR/block_bindc_pocs.f90 +++ b/flang/test/Lower/HLFIR/block_bindc_pocs.f90 @@ -8,9 +8,9 @@ module m end subroutine test_proc end interface end module m -!CHECK-DAG: %[[S0:.*]] = fir.call @llvm.stacksave.p0() fastmath<contract> : () -> !fir.ref<i8> +!CHECK-DAG: %[[S0:.*]] = llvm.intr.stacksave : !llvm.ptr !CHECK-DAG: fir.call @test_proc() proc_attrs<bind_c> fastmath<contract> : () -> () -!CHECK-DAG: fir.call @llvm.stackrestore.p0(%[[S0]]) fastmath<contract> : (!fir.ref<i8>) -> () +!CHECK-DAG: llvm.intr.stackrestore %[[S0]] : !llvm.ptr !CHECK-DAG: func.func private @test_proc() attributes {fir.bindc_name = "test_proc"} subroutine test BLOCK diff --git a/flang/test/Lower/HLFIR/elemental-array-ops.f90 b/flang/test/Lower/HLFIR/elemental-array-ops.f90 index 80801fd..9929c17 100644 --- a/flang/test/Lower/HLFIR/elemental-array-ops.f90 +++ b/flang/test/Lower/HLFIR/elemental-array-ops.f90 @@ -182,12 +182,12 @@ end subroutine char_return ! CHECK: %[[VAL_23:.*]] = arith.constant 0 : index ! CHECK: %[[VAL_24:.*]] = arith.cmpi sgt, %[[VAL_22]], %[[VAL_23]] : index ! CHECK: %[[VAL_25:.*]] = arith.select %[[VAL_24]], %[[VAL_22]], %[[VAL_23]] : index -! CHECK: %[[VAL_26:.*]] = fir.call @llvm.stacksave.p0() fastmath<contract> : () -> !fir.ref<i8> +! CHECK: %[[VAL_26:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[VAL_27:.*]] = fir.call @_QPcallee(%[[VAL_2]], %[[VAL_25]], %[[VAL_20]]) fastmath<contract> : (!fir.ref<!fir.char<1,3>>, index, !fir.boxchar<1>) -> !fir.boxchar<1> ! CHECK: %[[VAL_28:.*]]:2 = hlfir.declare %[[VAL_2]] typeparams %[[VAL_25]] {uniq_name = ".tmp.func_result"} : (!fir.ref<!fir.char<1,3>>, index) -> (!fir.ref<!fir.char<1,3>>, !fir.ref<!fir.char<1,3>>) ! CHECK: %[[MustFree:.*]] = arith.constant false ! CHECK: %[[ResultTemp:.*]] = hlfir.as_expr %[[VAL_28]]#0 move %[[MustFree]] : (!fir.ref<!fir.char<1,3>>, i1) -> !hlfir.expr<!fir.char<1,3>> -! CHECK: fir.call @llvm.stackrestore.p0(%[[VAL_26]]) fastmath<contract> : (!fir.ref<i8>) -> () +! CHECK: llvm.intr.stackrestore %[[VAL_26]] : !llvm.ptr ! CHECK: hlfir.yield_element %[[ResultTemp]] : !hlfir.expr<!fir.char<1,3>> ! CHECK: } ! CHECK: %[[VAL_29:.*]] = arith.constant 0 : index diff --git a/flang/test/Lower/HLFIR/ignore-type-f77-character.f90 b/flang/test/Lower/HLFIR/ignore-type-f77-character.f90 new file mode 100644 index 0000000..41dbf82 --- /dev/null +++ b/flang/test/Lower/HLFIR/ignore-type-f77-character.f90 @@ -0,0 +1,35 @@ +! Test ignore_tkr(tk) with character dummies +! RUN: %flang_fc1 -emit-fir %s -o - | FileCheck %s + +module test_char_tk + interface + subroutine foo(c) + character(1)::c(*) + !dir$ ignore_tkr(tkrdm) c + end subroutine + end interface +contains + subroutine test_normal() + character(1) :: c(10) + call foo(c) + end subroutine +!CHECK-LABEL: func.func @_QMtest_char_tkPtest_normal( +!CHECK: %[[VAL_6:.*]] = fir.emboxchar %{{.*}}, %c1{{.*}}: (!fir.ref<!fir.char<1>>, index) -> !fir.boxchar<1> +!CHECK: fir.call @_QPfoo(%[[VAL_6]]) fastmath<contract> : (!fir.boxchar<1>) -> () + subroutine test_normal2() + character(10) :: c(10) + call foo(c) + end subroutine +!CHECK-LABEL: func.func @_QMtest_char_tkPtest_normal2( +!CHECK: %[[VAL_4:.*]] = fir.convert %{{.*}} : (!fir.ref<!fir.array<10x!fir.char<1,10>>>) -> !fir.ref<!fir.char<1,10>> +!CHECK: %[[VAL_5:.*]] = fir.emboxchar %[[VAL_4]], %c10{{.*}}: (!fir.ref<!fir.char<1,10>>, index) -> !fir.boxchar<1> +!CHECK: fir.call @_QPfoo(%[[VAL_5]]) fastmath<contract> : (!fir.boxchar<1>) -> () + subroutine test_weird() + real :: c(10) + call foo(c) + end subroutine +!CHECK-LABEL: func.func @_QMtest_char_tkPtest_weird( +!CHECK: %[[VAL_5:.*]] = fir.convert %{{.*}} : (!fir.ref<!fir.array<10xf32>>) -> !fir.ref<!fir.char<1,?>> +!CHECK: %[[VAL_6:.*]] = fir.emboxchar %[[VAL_5]], %c0{{.*}}: (!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1> +!CHECK: fir.call @_QPfoo(%[[VAL_6]]) fastmath<contract> : (!fir.boxchar<1>) -> () +end module diff --git a/flang/test/Lower/HLFIR/proc-pointer-comp-pass.f90 b/flang/test/Lower/HLFIR/proc-pointer-comp-pass.f90 index 247008e..c1a827f 100644 --- a/flang/test/Lower/HLFIR/proc-pointer-comp-pass.f90 +++ b/flang/test/Lower/HLFIR/proc-pointer-comp-pass.f90 @@ -105,6 +105,6 @@ end subroutine ! CHECK: %[[VAL_7:.*]] = arith.constant 0 : index ! CHECK: %[[VAL_8:.*]] = arith.cmpi sgt, %[[VAL_6]], %[[VAL_7]] : index ! CHECK: %[[VAL_9:.*]] = arith.select %[[VAL_8]], %[[VAL_6]], %[[VAL_7]] : index -! CHECK: %[[VAL_10:.*]] = fir.call @llvm.stacksave.p0() fastmath<contract> : () -> !fir.ref<i8> +! CHECK: %[[VAL_10:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[VAL_11:.*]] = fir.box_addr %[[VAL_4]] : (!fir.boxproc<(!fir.ref<!fir.char<1,4>>, index, !fir.ref<!fir.type<_QMmTt3{c:!fir.char<1,4>,p:!fir.boxproc<(!fir.ref<!fir.char<1,4>>, index, !fir.ref<!fir.type<_QMmTt3>>) -> !fir.boxchar<1>>}>>) -> !fir.boxchar<1>>) -> ((!fir.ref<!fir.char<1,4>>, index, !fir.ref<!fir.type<_QMmTt3{c:!fir.char<1,4>,p:!fir.boxproc<(!fir.ref<!fir.char<1,4>>, index, !fir.ref<!fir.type<_QMmTt3>>) -> !fir.boxchar<1>>}>>) -> !fir.boxchar<1>) ! CHECK: %[[VAL_12:.*]] = fir.call %[[VAL_11]](%[[VAL_1]], %[[VAL_9]], %[[VAL_2]]#1) fastmath<contract> : (!fir.ref<!fir.char<1,4>>, index, !fir.ref<!fir.type<_QMmTt3{c:!fir.char<1,4>,p:!fir.boxproc<(!fir.ref<!fir.char<1,4>>, index, !fir.ref<!fir.type<_QMmTt3>>) -> !fir.boxchar<1>>}>>) -> !fir.boxchar<1> diff --git a/flang/test/Lower/HLFIR/where-nonelemental.f90 b/flang/test/Lower/HLFIR/where-nonelemental.f90 index f0a6857..15a281b 100644 --- a/flang/test/Lower/HLFIR/where-nonelemental.f90 +++ b/flang/test/Lower/HLFIR/where-nonelemental.f90 @@ -26,10 +26,10 @@ end subroutine ! CHECK-LABEL: func.func @_QPtest_where( ! CHECK: hlfir.where { ! CHECK-NOT: hlfir.exactly_once -! CHECK: %[[VAL_17:.*]] = fir.call @llvm.stacksave.p0() fastmath<contract> : () -> !fir.ref<i8> +! CHECK: %[[VAL_17:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[VAL_19:.*]] = fir.call @_QPlogical_func1() fastmath<contract> : () -> !fir.array<100x!fir.logical<4>> ! CHECK: hlfir.yield %{{.*}} : !hlfir.expr<100x!fir.logical<4>> cleanup { -! CHECK: fir.call @llvm.stackrestore.p0(%[[VAL_17]]) fastmath<contract> : (!fir.ref<i8>) -> () +! CHECK: llvm.intr.stackrestore %[[VAL_17]] : !llvm.ptr ! CHECK: } ! CHECK: } do { ! CHECK: hlfir.region_assign { @@ -70,10 +70,10 @@ end subroutine ! CHECK: } ! CHECK: hlfir.elsewhere mask { ! CHECK: %[[VAL_62:.*]] = hlfir.exactly_once : !hlfir.expr<100x!fir.logical<4>> { -! CHECK: %[[VAL_72:.*]] = fir.call @llvm.stacksave.p0() fastmath<contract> : () -> !fir.ref<i8> +! CHECK: %[[VAL_72:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: fir.call @_QPlogical_func2() fastmath<contract> : () -> !fir.array<100x!fir.logical<4>> ! CHECK: hlfir.yield %{{.*}} : !hlfir.expr<100x!fir.logical<4>> cleanup { -! CHECK: fir.call @llvm.stackrestore.p0(%[[VAL_72]]) fastmath<contract> : (!fir.ref<i8>) -> () +! CHECK: llvm.intr.stackrestore %[[VAL_72]] : !llvm.ptr ! CHECK: } ! CHECK: } ! CHECK: hlfir.yield %[[VAL_62]] : !hlfir.expr<100x!fir.logical<4>> @@ -123,11 +123,11 @@ end subroutine ! CHECK: } (%[[VAL_10:.*]]: i32) { ! CHECK: %[[VAL_11:.*]] = hlfir.forall_index "i" %[[VAL_10]] : (i32) -> !fir.ref<i32> ! CHECK: hlfir.where { -! CHECK: %[[VAL_21:.*]] = fir.call @llvm.stacksave.p0() fastmath<contract> : () -> !fir.ref<i8> +! CHECK: %[[VAL_21:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK-NOT: hlfir.exactly_once ! CHECK: %[[VAL_23:.*]] = fir.call @_QPpure_logical_func1() fastmath<contract> : () -> !fir.array<100x!fir.logical<4>> ! CHECK: hlfir.yield %{{.*}} : !hlfir.expr<100x!fir.logical<4>> cleanup { -! CHECK: fir.call @llvm.stackrestore.p0(%[[VAL_21]]) fastmath<contract> : (!fir.ref<i8>) -> () +! CHECK: llvm.intr.stackrestore %[[VAL_21]] : !llvm.ptr ! CHECK: } ! CHECK: } do { ! CHECK: hlfir.region_assign { @@ -172,10 +172,10 @@ end subroutine ! CHECK: } ! CHECK: hlfir.elsewhere mask { ! CHECK: %[[VAL_129:.*]] = hlfir.exactly_once : !hlfir.expr<100x!fir.logical<4>> { -! CHECK: %[[VAL_139:.*]] = fir.call @llvm.stacksave.p0() fastmath<contract> : () -> !fir.ref<i8> +! CHECK: %[[VAL_139:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[VAL_141:.*]] = fir.call @_QPpure_logical_func2() fastmath<contract> : () -> !fir.array<100x!fir.logical<4>> ! CHECK: hlfir.yield %{{.*}} : !hlfir.expr<100x!fir.logical<4>> cleanup { -! CHECK: fir.call @llvm.stackrestore.p0(%[[VAL_139]]) fastmath<contract> : (!fir.ref<i8>) -> () +! CHECK: llvm.intr.stackrestore %[[VAL_139]] : !llvm.ptr ! CHECK: } ! CHECK: } ! CHECK: hlfir.yield %[[VAL_129]] : !hlfir.expr<100x!fir.logical<4>> diff --git a/flang/test/Lower/OpenMP/distribute-parallel-do.f90 b/flang/test/Lower/OpenMP/distribute-parallel-do.f90 index 91940ac..48567a1 100644 --- a/flang/test/Lower/OpenMP/distribute-parallel-do.f90 +++ b/flang/test/Lower/OpenMP/distribute-parallel-do.f90 @@ -36,21 +36,21 @@ subroutine distribute_parallel_do_dist_schedule() !$omp end teams end subroutine distribute_parallel_do_dist_schedule -! CHECK-LABEL: func.func @_QPdistribute_parallel_do_ordered( -subroutine distribute_parallel_do_ordered() +! CHECK-LABEL: func.func @_QPdistribute_parallel_do_schedule( +subroutine distribute_parallel_do_schedule() !$omp teams ! CHECK: omp.parallel private({{.*}}) { ! CHECK: omp.distribute { - ! CHECK-NEXT: omp.wsloop ordered(1) { + ! CHECK-NEXT: omp.wsloop schedule(runtime) { ! CHECK-NEXT: omp.loop_nest - !$omp distribute parallel do ordered(1) + !$omp distribute parallel do schedule(runtime) do index_ = 1, 10 end do !$omp end distribute parallel do !$omp end teams -end subroutine distribute_parallel_do_ordered +end subroutine distribute_parallel_do_schedule ! CHECK-LABEL: func.func @_QPdistribute_parallel_do_private( subroutine distribute_parallel_do_private() diff --git a/flang/test/Lower/array-elemental-calls-char.f90 b/flang/test/Lower/array-elemental-calls-char.f90 index e2507f0..652e792 100644 --- a/flang/test/Lower/array-elemental-calls-char.f90 +++ b/flang/test/Lower/array-elemental-calls-char.f90 @@ -227,7 +227,7 @@ subroutine foo6(c) ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_15]] : (i32) -> index ! CHECK: %[[CMPI:.*]] = arith.cmpi sgt, %[[VAL_16]], %{{.*}} : index ! CHECK: %[[SELECT:.*]] = arith.select %[[CMPI]], %[[VAL_16]], %{{.*}} : index - ! CHECK: %[[VAL_17:.*]] = fir.call @llvm.stacksave.p0() {{.*}}: () -> !fir.ref<i8> + ! CHECK: %[[VAL_17:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[VAL_18:.*]] = fir.alloca !fir.char<1,?>(%[[SELECT]] : index) {bindc_name = ".result"} ! CHECK: %[[VAL_19:.*]] = fir.call @_QMchar_elemPelem_return_char(%[[VAL_18]], %[[SELECT]], %[[VAL_14]]) {{.*}}: (!fir.ref<!fir.char<1,?>>, index, !fir.boxchar<1>) -> !fir.boxchar<1> ! CHECK: %[[VAL_20:.*]] = arith.cmpi slt, %[[VAL_6]]#1, %[[SELECT]] : index @@ -253,7 +253,7 @@ subroutine foo6(c) ! CHECK: %[[VAL_36:.*]] = arith.subi %[[VAL_31]], %[[VAL_2]] : index ! CHECK: br ^bb3(%[[VAL_35]], %[[VAL_36]] : index, index) ! CHECK: ^bb5: - ! CHECK: fir.call @llvm.stackrestore.p0(%[[VAL_17]]) {{.*}}: (!fir.ref<i8>) -> () + ! CHECK: llvm.intr.stackrestore %[[VAL_17]] : !llvm.ptr ! CHECK: %[[VAL_37:.*]] = arith.subi %[[VAL_10]], %[[VAL_2]] : index ! CHECK: br ^bb1(%[[VAL_12]], %[[VAL_37]] : index, index) ! CHECK: ^bb6: diff --git a/flang/test/Lower/block.f90 b/flang/test/Lower/block.f90 index 70ff67d..d2bc90e 100644 --- a/flang/test/Lower/block.f90 +++ b/flang/test/Lower/block.f90 @@ -7,13 +7,13 @@ program bb ! block stack management and exits integer :: i, j ! CHECK: fir.store %c0{{.*}} to %[[V_1]] : !fir.ref<i32> i = 0 - ! CHECK: %[[V_3:[0-9]+]] = fir.call @llvm.stacksave.p0() + ! CHECK: %[[V_3:[0-9]+]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: fir.store %{{.*}} to %[[V_1]] : !fir.ref<i32> ! CHECK: br ^bb1 ! CHECK: ^bb1: // 2 preds: ^bb0, ^bb16 ! CHECK: cond_br %{{.*}}, ^bb2, ^bb17 ! CHECK: ^bb2: // pred: ^bb1 - ! CHECK: %[[V_11:[0-9]+]] = fir.call @llvm.stacksave.p0() + ! CHECK: %[[V_11:[0-9]+]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: fir.store %{{.*}} to %[[V_1]] : !fir.ref<i32> ! CHECK: cond_br %{{.*}}, ^bb3, ^bb4 ! CHECK: ^bb3: // pred: ^bb2 @@ -27,29 +27,29 @@ program bb ! block stack management and exits ! CHECK: fir.store %{{.*}} to %[[V_1]] : !fir.ref<i32> ! CHECK: cond_br %{{.*}}, ^bb7, ^bb8 ! CHECK: ^bb7: // pred: ^bb6 - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_11]]) + ! CHECK: llvm.intr.stackrestore %[[V_11]] : !llvm.ptr ! CHECK: br ^bb15 ! CHECK: ^bb8: // pred: ^bb6 ! CHECK: fir.store %{{.*}} to %[[V_1]] : !fir.ref<i32> ! CHECK: cond_br %{{.*}}, ^bb9, ^bb10 ! CHECK: ^bb9: // pred: ^bb8 - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_11]]) + ! CHECK: llvm.intr.stackrestore %[[V_11]] : !llvm.ptr ! CHECK: br ^bb16 ! CHECK: ^bb10: // 2 preds: ^bb3, ^bb8 ! CHECK: fir.store %{{.*}} to %[[V_1]] : !fir.ref<i32> ! CHECK: cond_br %{{.*}}, ^bb11, ^bb12 ! CHECK: ^bb11: // pred: ^bb10 - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_11]]) + ! CHECK: llvm.intr.stackrestore %[[V_11]] : !llvm.ptr ! CHECK: br ^bb18 ! CHECK: ^bb12: // pred: ^bb10 ! CHECK: fir.store %{{.*}} to %[[V_1]] : !fir.ref<i32> ! CHECK: cond_br %{{.*}}, ^bb13, ^bb14 ! CHECK: ^bb13: // pred: ^bb12 - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_11]]) - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_3]]) + ! CHECK: llvm.intr.stackrestore %[[V_11]] : !llvm.ptr + ! CHECK: llvm.intr.stackrestore %[[V_3]] : !llvm.ptr ! CHECK: br ^bb19 ! CHECK: ^bb14: // 2 preds: ^bb5, ^bb12 - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_11]]) + ! CHECK: llvm.intr.stackrestore %[[V_11]] : !llvm.ptr ! CHECK: br ^bb15 ! CHECK: ^bb15: // 2 preds: ^bb7, ^bb14 ! CHECK: br ^bb16 @@ -59,7 +59,7 @@ program bb ! block stack management and exits ! CHECK: fir.store %{{.*}} to %[[V_1]] : !fir.ref<i32> ! CHECK: cf.br ^bb18 ! CHECK: ^bb18: // 2 preds: ^bb11, ^bb17 - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_3]]) + ! CHECK: llvm.intr.stackrestore %[[V_3]] : !llvm.ptr ! CHECK: br ^bb19 ! CHECK: ^bb19: // 2 preds: ^bb13, ^bb18 block @@ -79,10 +79,10 @@ program bb ! block stack management and exits 12 end block 100 print*, i ! expect 21 - ! CHECK: %[[V_51:[0-9]+]] = fir.call @llvm.stacksave.p0() fastmath<contract> : () -> !fir.ref<i8> + ! CHECK: %[[V_51:[0-9]+]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: fir.store %c5{{.*}} to %[[V_0]] : !fir.ref<i32> ! CHECK: fir.call @ss(%[[V_0]]) proc_attrs<bind_c> fastmath<contract> : (!fir.ref<i32>) -> () - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_51]]) fastmath<contract> : (!fir.ref<i8>) -> () + ! CHECK: llvm.intr.stackrestore %[[V_51]] : !llvm.ptr block interface subroutine ss(n) bind(c) diff --git a/flang/test/Lower/call-suspect.f90 b/flang/test/Lower/call-suspect.f90 index 6a3bca8..4ac58bf 100644 --- a/flang/test/Lower/call-suspect.f90 +++ b/flang/test/Lower/call-suspect.f90 @@ -2,13 +2,10 @@ ! are accepted regardless to maintain backwards compatibility with ! other Fortran implementations. -! RUN: bbc -emit-fir -hlfir=false %s -o - | FileCheck %s +! RUN: bbc -emit-fir %s -o - | FileCheck %s ! CHECK-LABEL: func @_QPs1() { -! CHECK: %[[cast:.*]] = fir.convert %{{.*}} : (!fir.ref<f32>) -> !fir.ref<!fir.char<1,?>> -! CHECK: %[[undef:.*]] = fir.undefined index -! CHECK: %[[box:.*]] = fir.emboxchar %[[cast]], %[[undef]] : (!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1> -! CHECK: fir.call @_QPs3(%[[box]]) {{.*}}: (!fir.boxchar<1>) -> () +! CHECK: fir.convert %{{.*}} : ((!fir.boxchar<1>) -> ()) -> ((!fir.ref<f32>) -> ()) ! Pass a REAL by reference to a subroutine expecting a CHARACTER subroutine s1 @@ -16,11 +13,7 @@ subroutine s1 end subroutine s1 ! CHECK-LABEL: func @_QPs2( -! CHECK: %[[ptr:.*]] = fir.box_addr %{{.*}} : (!fir.box<!fir.ptr<f32>>) -> !fir.ptr<f32> -! CHECK: %[[cast:.*]] = fir.convert %[[ptr]] : (!fir.ptr<f32>) -> !fir.ref<!fir.char<1,?>> -! CHECK: %[[undef:.*]] = fir.undefined index -! CHECK: %[[box:.*]] = fir.emboxchar %[[cast]], %[[undef]] : (!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1> -! CHECK: fir.call @_QPs3(%[[box]]) {{.*}}: (!fir.boxchar<1>) -> () +! CHECK: fir.convert %{{.*}} : ((!fir.boxchar<1>) -> ()) -> ((!fir.ref<f32>) -> ()) ! Pass a REAL, POINTER data reference to a subroutine expecting a CHARACTER subroutine s2(p) diff --git a/flang/test/Lower/computed-goto.f90 b/flang/test/Lower/computed-goto.f90 index adf76c3..bb24411 100644 --- a/flang/test/Lower/computed-goto.f90 +++ b/flang/test/Lower/computed-goto.f90 @@ -38,15 +38,15 @@ end ! CHECK-LABEL: func @_QPm1 function m1(index) ! CHECK: %[[V_0:[0-9]+]] = fir.alloca i32 {bindc_name = "m1" - ! CHECK: %[[V_1:[0-9]+]] = fir.call @llvm.stacksave.p0() + ! CHECK: %[[V_1:[0-9]+]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[V_2:[0-9]+]] = fir.load %arg0 : !fir.ref<i32> ! CHECK: %[[V_3:[0-9]+]] = arith.cmpi eq, %[[V_2]], %c1{{.*}} : i32 ! CHECK: cf.cond_br %[[V_3]], ^bb1, ^bb2 ! CHECK: ^bb1: // pred: ^bb0 - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_1]]) + ! CHECK: llvm.intr.stackrestore %[[V_1]] : !llvm.ptr ! CHECK: cf.br ^bb3 ! CHECK: ^bb2: // pred: ^bb0 - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_1]]) + ! CHECK: llvm.intr.stackrestore %[[V_1]] : !llvm.ptr ! CHECK: fir.store %c0{{.*}} to %[[V_0]] : !fir.ref<i32> ! CHECK: cf.br ^bb4 ! CHECK: ^bb3: // pred: ^bb1 @@ -65,21 +65,21 @@ end ! CHECK-LABEL: func @_QPm2 function m2(index) ! CHECK: %[[V_0:[0-9]+]] = fir.alloca i32 {bindc_name = "m2" - ! CHECK: %[[V_1:[0-9]+]] = fir.call @llvm.stacksave.p0() + ! CHECK: %[[V_1:[0-9]+]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[V_2:[0-9]+]] = fir.load %arg0 : !fir.ref<i32> ! CHECK: %[[V_3:[0-9]+]] = arith.cmpi eq, %[[V_2]], %c1{{.*}} : i32 ! CHECK: cf.cond_br %[[V_3]], ^bb1, ^bb2 ! CHECK: ^bb1: // pred: ^bb0 - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_1]]) + ! CHECK: llvm.intr.stackrestore %[[V_1]] : !llvm.ptr ! CHECK: cf.br ^bb5 ! CHECK: ^bb2: // pred: ^bb0 ! CHECK: %[[V_4:[0-9]+]] = arith.cmpi eq, %[[V_2]], %c2{{.*}} : i32 ! CHECK: cf.cond_br %[[V_4]], ^bb3, ^bb4 ! CHECK: ^bb3: // pred: ^bb2 - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_1]]) + ! CHECK: llvm.intr.stackrestore %[[V_1]] : !llvm.ptr ! CHECK: cf.br ^bb6 ! CHECK: ^bb4: // pred: ^bb2 - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_1]]) + ! CHECK: llvm.intr.stackrestore %[[V_1]] : !llvm.ptr ! CHECK: fir.store %c0{{.*}} to %[[V_0]] : !fir.ref<i32> ! CHECK: cf.br ^bb7 ! CHECK: ^bb5: // pred: ^bb1 @@ -102,27 +102,27 @@ end ! CHECK-LABEL: func @_QPm3 function m3(index) ! CHECK: %[[V_0:[0-9]+]] = fir.alloca i32 {bindc_name = "m3" - ! CHECK: %[[V_1:[0-9]+]] = fir.call @llvm.stacksave.p0() + ! CHECK: %[[V_1:[0-9]+]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[V_2:[0-9]+]] = fir.load %arg0 : !fir.ref<i32> ! CHECK: %[[V_3:[0-9]+]] = arith.cmpi eq, %[[V_2]], %c1{{.*}} : i32 ! CHECK: cf.cond_br %[[V_3]], ^bb1, ^bb2 ! CHECK: ^bb1: // pred: ^bb0 - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_1]]) + ! CHECK: llvm.intr.stackrestore %[[V_1]] : !llvm.ptr ! CHECK: cf.br ^bb7 ! CHECK: ^bb2: // pred: ^bb0 ! CHECK: %[[V_4:[0-9]+]] = arith.cmpi eq, %[[V_2]], %c2{{.*}} : i32 ! CHECK: cf.cond_br %[[V_4]], ^bb3, ^bb4 ! CHECK: ^bb3: // pred: ^bb2 - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_1]]) + ! CHECK: llvm.intr.stackrestore %[[V_1]] : !llvm.ptr ! CHECK: cf.br ^bb8 ! CHECK: ^bb4: // pred: ^bb2 ! CHECK: %[[V_5:[0-9]+]] = arith.cmpi eq, %[[V_2]], %c3{{.*}} : i32 ! CHECK: cf.cond_br %[[V_5]], ^bb5, ^bb6 ! CHECK: ^bb5: // pred: ^bb4 - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_1]]) + ! CHECK: llvm.intr.stackrestore %[[V_1]] : !llvm.ptr ! CHECK: cf.br ^bb9 ! CHECK: ^bb6: // pred: ^bb4 - ! CHECK: fir.call @llvm.stackrestore.p0(%[[V_1]]) + ! CHECK: llvm.intr.stackrestore %[[V_1]] : !llvm.ptr ! CHECK: fir.store %c0{{.*}} to %[[V_0]] : !fir.ref<i32> ! CHECK: cf.br ^bb10 ! CHECK: ^bb7: // pred: ^bb1 diff --git a/flang/test/Lower/dummy-procedure-character.f90 b/flang/test/Lower/dummy-procedure-character.f90 index 9a2710f..a9a8a5a 100644 --- a/flang/test/Lower/dummy-procedure-character.f90 +++ b/flang/test/Lower/dummy-procedure-character.f90 @@ -195,7 +195,7 @@ subroutine call_explicit_length_with_iface(bar10) ! CHECK: %[[C0:.*]] = arith.constant 0 : index ! CHECK: %[[COMPI:.*]] = arith.cmpi sgt, %[[VAL_5]], %[[C0]] : index ! CHECK: %[[SELECT:.*]] = arith.select %[[CMPI]], %[[VAL_5]], %[[C0]] : index -! CHECK: %[[VAL_6:.*]] = fir.call @llvm.stacksave.p0() {{.*}}: () -> !fir.ref<i8> +! CHECK: %[[VAL_6:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[VAL_7:.*]] = fir.alloca !fir.char<1,?>(%[[SELECT]] : index) {bindc_name = ".result"} ! CHECK: %[[VAL_8:.*]] = fir.convert %[[WAL_1]] : (() -> ()) -> ((!fir.ref<!fir.char<1,?>>, index, !fir.ref<i64>) -> !fir.boxchar<1>) ! CHECK: fir.call %[[VAL_8]](%[[VAL_7]], %[[SELECT]], %[[VAL_1]]) {{.*}}: (!fir.ref<!fir.char<1,?>>, index, !fir.ref<i64>) -> !fir.boxchar<1> diff --git a/flang/test/Lower/explicit-interface-results-2.f90 b/flang/test/Lower/explicit-interface-results-2.f90 index a63ee5f..95aee84 100644 --- a/flang/test/Lower/explicit-interface-results-2.f90 +++ b/flang/test/Lower/explicit-interface-results-2.f90 @@ -252,12 +252,12 @@ subroutine test_call_to_used_interface(dummy_proc) call takes_array(dummy_proc()) ! CHECK: %[[VAL_1:.*]] = arith.constant 100 : index ! CHECK: %[[VAL_2:.*]] = fir.alloca !fir.array<100xf32> {bindc_name = ".result"} -! CHECK: %[[VAL_3:.*]] = fir.call @llvm.stacksave.p0() {{.*}}: () -> !fir.ref<i8> +! CHECK: %[[VAL_3:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[VAL_4:.*]] = fir.shape %[[VAL_1]] : (index) -> !fir.shape<1> ! CHECK: %[[VAL_5:.*]] = fir.box_addr %[[VAL_0]] : (!fir.boxproc<() -> ()>) -> (() -> !fir.array<100xf32>) ! CHECK: %[[VAL_6:.*]] = fir.call %[[VAL_5]]() {{.*}}: () -> !fir.array<100xf32> ! CHECK: fir.save_result %[[VAL_6]] to %[[VAL_2]](%[[VAL_4]]) : !fir.array<100xf32>, !fir.ref<!fir.array<100xf32>>, !fir.shape<1> ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_2]] : (!fir.ref<!fir.array<100xf32>>) -> !fir.ref<!fir.array<?xf32>> ! CHECK: fir.call @_QPtakes_array(%[[VAL_7]]) {{.*}}: (!fir.ref<!fir.array<?xf32>>) -> () -! CHECK: fir.call @llvm.stackrestore.p0(%[[VAL_3]]) {{.*}}: (!fir.ref<i8>) -> () +! CHECK: llvm.intr.stackrestore %[[VAL_3]] : !llvm.ptr end subroutine diff --git a/flang/test/Lower/forall/array-constructor.f90 b/flang/test/Lower/forall/array-constructor.f90 index ad21ed3..4c8c756 100644 --- a/flang/test/Lower/forall/array-constructor.f90 +++ b/flang/test/Lower/forall/array-constructor.f90 @@ -232,7 +232,7 @@ end subroutine ac2 ! CHECK: %[[C0:.*]] = arith.constant 0 : index ! CHECK: %[[CMPI:.*]] = arith.cmpi sgt, %[[VAL_80]], %[[C0]] : index ! CHECK: %[[SELECT:.*]] = arith.select %[[CMPI]], %[[VAL_80]], %[[C0]] : index -! CHECK: %[[VAL_81:.*]] = fir.call @llvm.stacksave.p0() {{.*}}: () -> !fir.ref<i8> +! CHECK: %[[VAL_81:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[VAL_82:.*]] = fir.shape %[[SELECT]] : (index) -> !fir.shape<1> ! CHECK: %[[VAL_83:.*]] = fir.convert %[[VAL_74]] : (!fir.box<!fir.array<1xi32>>) -> !fir.box<!fir.array<?xi32>> ! CHECK: %[[VAL_84:.*]] = fir.call @_QFac2Pfunc(%[[VAL_83]]) {{.*}}: (!fir.box<!fir.array<?xi32>>) -> !fir.array<3xi32> @@ -250,7 +250,7 @@ end subroutine ac2 ! CHECK: %[[VAL_97:.*]] = fir.array_update %[[VAL_92]], %[[VAL_93]], %[[VAL_96]] : (!fir.array<?xi32>, i32, index) -> !fir.array<?xi32> ! CHECK: fir.result %[[VAL_97]] : !fir.array<?xi32> ! CHECK: } -! CHECK: fir.call @llvm.stackrestore.p0(%[[VAL_81]]) {{.*}}: (!fir.ref<i8>) -> () +! CHECK: llvm.intr.stackrestore %[[VAL_81]] : !llvm.ptr ! CHECK: fir.freemem %[[VAL_61]] : !fir.heap<!fir.array<1xi32>> ! CHECK: fir.freemem %[[VAL_57]] : !fir.heap<!fir.array<1xi32>> ! CHECK: fir.result %[[VAL_98:.*]] : !fir.array<?xi32> diff --git a/flang/test/Lower/host-associated-functions.f90 b/flang/test/Lower/host-associated-functions.f90 index d67a74f..6504584 100644 --- a/flang/test/Lower/host-associated-functions.f90 +++ b/flang/test/Lower/host-associated-functions.f90 @@ -36,7 +36,7 @@ contains ! CHECK: %[[C0:.*]] = arith.constant 0 : index ! CHECK: %[[CMPI:.*]] = arith.cmpi sgt, %[[VAL_16]], %[[C0]] : index ! CHECK: %[[SELECT:.*]] = arith.select %[[CMPI]], %[[VAL_16]], %[[C0]] : index - ! CHECK: %[[VAL_17:.*]] = fir.call @llvm.stacksave.p0() {{.*}}: () -> !fir.ref<i8> + ! CHECK: %[[VAL_17:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[VAL_18:.*]] = fir.alloca !fir.char<1,?>(%[[SELECT]] : index) {bindc_name = ".result"} ! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_13]] : (() -> ()) -> ((!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1>) ! CHECK: %[[VAL_20:.*]] = fir.call %[[VAL_19]](%[[VAL_18]], %[[SELECT]]) {{.*}}: (!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1> @@ -64,7 +64,7 @@ contains ! CHECK: %[[VAL_9:.*]] = fir.extract_value %[[VAL_3]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> !fir.boxproc<() -> ()> ! CHECK: %[[VAL_10:.*]] = fir.box_addr %[[VAL_9]] : (!fir.boxproc<() -> ()>) -> (() -> ()) ! CHECK: %[[VAL_11:.*]] = fir.extract_value %[[VAL_3]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> i64 -! CHECK: %[[VAL_12:.*]] = fir.call @llvm.stacksave.p0() {{.*}}: () -> !fir.ref<i8> +! CHECK: %[[VAL_12:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[VAL_13:.*]] = fir.alloca !fir.char<1,?>(%[[VAL_11]] : i64) {bindc_name = ".result"} ! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_10]] : (() -> ()) -> ((!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1>) ! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_11]] : (i64) -> index @@ -124,7 +124,7 @@ contains ! CHECK: %[[C0:.*]] = arith.constant 0 : index ! CHECK: %[[CMPI:.*]] = arith.cmpi sgt, %[[VAL_15]], %[[C0]] : index ! CHECK: %[[SELECT:.*]] = arith.select %[[CMPI]], %[[VAL_15]], %[[C0]] : index -! CHECK: %[[VAL_16:.*]] = fir.call @llvm.stacksave.p0() {{.*}}: () -> !fir.ref<i8> +! CHECK: %[[VAL_16:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[VAL_17:.*]] = fir.alloca !fir.array<?xi32>, %[[SELECT]] {bindc_name = ".result"} print *, array_func() end subroutine diff --git a/flang/test/Lower/host-associated.f90 b/flang/test/Lower/host-associated.f90 index b019510..67465f5 100644 --- a/flang/test/Lower/host-associated.f90 +++ b/flang/test/Lower/host-associated.f90 @@ -515,12 +515,12 @@ end subroutine test_proc_dummy_other ! CHECK: %[[VAL_35:.*]] = fir.undefined tuple<!fir.boxproc<() -> ()>, i64> ! CHECK: %[[VAL_36:.*]] = fir.insert_value %[[VAL_35]], %[[VAL_34]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, !fir.boxproc<() -> ()>) -> tuple<!fir.boxproc<() -> ()>, i64> ! CHECK: %[[VAL_37:.*]] = fir.insert_value %[[VAL_36]], %[[VAL_8]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, i64) -> tuple<!fir.boxproc<() -> ()>, i64> -! CHECK: %[[VAL_38:.*]] = fir.call @llvm.stacksave.p0() {{.*}}: () -> !fir.ref<i8> +! CHECK: %[[VAL_38:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[VAL_39:.*]] = fir.call @_QPget_message(%[[VAL_11]], %[[VAL_9]], %[[VAL_37]]) {{.*}}: (!fir.ref<!fir.char<1,40>>, index, tuple<!fir.boxproc<() -> ()>, i64>) -> !fir.boxchar<1> ! CHECK: %[[VAL_40:.*]] = fir.convert %[[VAL_11]] : (!fir.ref<!fir.char<1,40>>) -> !fir.ref<i8> ! CHECK: %[[VAL_41:.*]] = fir.convert %[[VAL_9]] : (index) -> i64 ! CHECK: %[[VAL_42:.*]] = fir.call @_FortranAioOutputAscii(%[[VAL_32]], %[[VAL_40]], %[[VAL_41]]) {{.*}}: (!fir.ref<i8>, !fir.ref<i8>, i64) -> i1 -! CHECK: fir.call @llvm.stackrestore.p0(%[[VAL_38]]) {{.*}}: (!fir.ref<i8>) -> () +! CHECK: llvm.intr.stackrestore %[[VAL_38]] : !llvm.ptr ! CHECK: %[[VAL_43:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_32]]) {{.*}}: (!fir.ref<i8>) -> i32 ! CHECK: return ! CHECK: } @@ -577,7 +577,7 @@ end subroutine test_proc_dummy_other ! CHECK: %[[VAL_11:.*]] = fir.extract_value %[[VAL_2]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> !fir.boxproc<() -> ()> ! CHECK: %[[VAL_12:.*]] = fir.box_addr %[[VAL_11]] : (!fir.boxproc<() -> ()>) -> (() -> ()) ! CHECK: %[[VAL_13:.*]] = fir.extract_value %[[VAL_2]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> i64 -! CHECK: %[[VAL_14:.*]] = fir.call @llvm.stacksave.p0() {{.*}}: () -> !fir.ref<i8> +! CHECK: %[[VAL_14:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[VAL_15:.*]] = fir.alloca !fir.char<1,?>(%[[VAL_13]] : i64) {bindc_name = ".result"} ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_12]] : (() -> ()) -> ((!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1>) ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_13]] : (i64) -> index @@ -624,7 +624,7 @@ end subroutine test_proc_dummy_other ! CHECK: %[[VAL_48:.*]] = arith.subi %[[VAL_43]], %[[VAL_6]] : index ! CHECK: br ^bb4(%[[VAL_47]], %[[VAL_48]] : index, index) ! CHECK: ^bb6: -! CHECK: fir.call @llvm.stackrestore.p0(%[[VAL_14]]) {{.*}}: (!fir.ref<i8>) -> () +! CHECK: llvm.intr.stackrestore %[[VAL_14]] : !llvm.ptr ! CHECK: %[[VAL_49:.*]] = fir.emboxchar %[[VAL_0]], %[[VAL_3]] : (!fir.ref<!fir.char<1,40>>, index) -> !fir.boxchar<1> ! CHECK: return %[[VAL_49]] : !fir.boxchar<1> ! CHECK: } diff --git a/flang/test/Lower/implicit-call-mismatch.f90 b/flang/test/Lower/implicit-call-mismatch.f90 index afe6ad8..ca605d6 100644 --- a/flang/test/Lower/implicit-call-mismatch.f90 +++ b/flang/test/Lower/implicit-call-mismatch.f90 @@ -135,8 +135,7 @@ subroutine test_conversion_from_proc ! CHECK: %[[proc:.*]] = fir.address_of(@_QPproc) : () -> () ! CHECK: %[[convert:.*]] = fir.convert %[[proc]] : (() -> ()) -> !fir.ref<!fir.char<1,?>> - ! CHECK: %[[len:.*]] = fir.undefined index - ! CHECK: %[[box:.*]] = fir.emboxchar %[[convert]], %[[len]] : (!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1> + ! CHECK: %[[box:.*]] = fir.emboxchar %[[convert]], %c0{{.*}} : (!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1> ! CHECK: fir.call @_QPpass_char_to_proc(%[[box]]) call pass_char_to_proc(proc) diff --git a/flang/test/Lower/io-write.f90 b/flang/test/Lower/io-write.f90 index ee079eb..234fcda 100644 --- a/flang/test/Lower/io-write.f90 +++ b/flang/test/Lower/io-write.f90 @@ -18,7 +18,7 @@ ! CHECK: %[[Const_0:.*]] = arith.constant 0 : index ! CHECK: %[[Val_7:.*]] = arith.cmpi sgt, %[[Val_6]], %[[Const_0]] : index ! CHECK: %[[Val_8:.*]] = arith.select %[[Val_7]], %[[Val_6]], %[[Const_0]] : index -! CHECK: %[[Val_9:.*]] = fir.call @llvm.stacksave.p0() {{.*}}: () -> !fir.ref<i8> +! CHECK: %[[Val_9:.*]] = llvm.intr.stacksave : !llvm.ptr ! CHECK: %[[Val_10:.*]] = fir.alloca !fir.char<1,?>(%[[Val_8]] : index) {bindc_name = ".result"} ! CHECK: %[[Val_11:.*]] = fir.call @_QFPgetstring(%[[Val_10]], %[[Val_8]], %[[Val_0]]) {{.*}}: (!fir.ref<!fir.char<1,?>>, index, !fir.ref<i32>) -> !fir.boxchar<1> ! CHECK: %[[Val_12:.*]] = fir.convert %[[Val_10]] : (!fir.ref<!fir.char<1,?>>) -> !fir.ref<i8> @@ -32,7 +32,7 @@ ! CHECK: %[[Val_18:.*]] = fir.call @_FortranAioBeginInternalFormattedOutput(%[[Val_2]], %[[Val_3]], %[[Val_12]], %[[Val_13]], ! %[[Val_14]], %[[Val_15]], %[[Const_0_i64_0]], %17, %{{.*}}) : (!fir.ref<i8>, i64, !fir.ref<i8>, i64, !fir.box<none>, !fir.ref<!fir.llvm_ptr<i8>>, i64, !fir.ref<i8>, i32) -> !fir.ref<i8> ! CHECK: %[[Val_19:.*]] = fir.call @_FortranAioEndIoStatement(%18) {{.*}}: (!fir.ref<i8>) -> i32 -! CHECK: fir.call @llvm.stackrestore.p0(%[[Val_9]]) {{.*}}: (!fir.ref<i8>) -> () +! CHECK: llvm.intr.stackrestore %[[Val_9]] : !llvm.ptr if (string/="hi") stop 'FAIL' contains function getstring(n) result(r) diff --git a/flang/test/Semantics/OpenMP/combined-constructs.f90 b/flang/test/Semantics/OpenMP/combined-constructs.f90 index 35ab6fc..b7a3848 100644 --- a/flang/test/Semantics/OpenMP/combined-constructs.f90 +++ b/flang/test/Semantics/OpenMP/combined-constructs.f90 @@ -100,6 +100,7 @@ program main enddo !$omp end target parallel do + !ERROR: COPYIN clause is not allowed on the TARGET PARALLEL DO directive !ERROR: Non-THREADPRIVATE object 'a' in COPYIN clause !$omp target parallel do copyin(a) do i = 1, N diff --git a/flang/test/Semantics/OpenMP/ordered03.f90 b/flang/test/Semantics/OpenMP/ordered03.f90 index 8dd4d03..18f85fc 100644 --- a/flang/test/Semantics/OpenMP/ordered03.f90 +++ b/flang/test/Semantics/OpenMP/ordered03.f90 @@ -52,6 +52,7 @@ subroutine sub1() end do !$omp end target parallel do + !ERROR: ORDERED clause is not allowed on the TARGET TEAMS DISTRIBUTE PARALLEL DO directive !$omp target teams distribute parallel do ordered(1) do i = 1, N !ERROR: An ORDERED construct with the DEPEND clause must be closely nested in a worksharing-loop (or parallel worksharing-loop) construct with ORDERED clause with a parameter diff --git a/flang/test/Transforms/stack-arrays.fir b/flang/test/Transforms/stack-arrays.fir index 45c22c1..66cd2a5 100644 --- a/flang/test/Transforms/stack-arrays.fir +++ b/flang/test/Transforms/stack-arrays.fir @@ -174,9 +174,9 @@ func.func @placement3() { // CHECK-NEXT: %[[C10:.*]] = arith.constant 10 : index // CHECK-NEXT: fir.do_loop // CHECK-NEXT: %[[SUM:.*]] = arith.addi %[[C1]], %[[C2]] : index -// CHECK-NEXT: %[[SP:.*]] = fir.call @llvm.stacksave.p0() : () -> !fir.ref<i8> +// CHECK-NEXT: %[[SP:.*]] = llvm.intr.stacksave : !llvm.ptr // CHECK-NEXT: %[[MEM:.*]] = fir.alloca !fir.array<?xi32>, %[[SUM]] -// CHECK-NEXT: fir.call @llvm.stackrestore.p0(%[[SP]]) +// CHECK-NEXT: llvm.intr.stackrestore %[[SP]] : !llvm.ptr // CHECK-NEXT: fir.result // CHECK-NEXT: } // CHECK-NEXT: return @@ -206,9 +206,9 @@ func.func @placement4(%arg0 : i1) { // CHECK-NEXT: cf.br ^bb1 // CHECK-NEXT: ^bb1: // CHECK-NEXT: %[[C3:.*]] = arith.constant 3 : index -// CHECK-NEXT: %[[SP:.*]] = fir.call @llvm.stacksave.p0() : () -> !fir.ref<i8> +// CHECK-NEXT: %[[SP:.*]] = llvm.intr.stacksave : !llvm.ptr // CHECK-NEXT: %[[MEM:.*]] = fir.alloca !fir.array<?xi32>, %[[C3]] -// CHECK-NEXT: fir.call @llvm.stackrestore.p0(%[[SP]]) : (!fir.ref<i8>) -> () +// CHECK-NEXT: llvm.intr.stackrestore %[[SP]] : !llvm.ptr // CHECK-NEXT: cf.cond_br %arg0, ^bb1, ^bb2 // CHECK-NEXT: ^bb2: // CHECK-NEXT: return @@ -393,9 +393,9 @@ func.func @placement_loop_declare() { // CHECK-NEXT: %[[C10:.*]] = arith.constant 10 : index // CHECK-NEXT: fir.do_loop // CHECK-NEXT: %[[SUM:.*]] = arith.addi %[[C1]], %[[C2]] : index -// CHECK-NEXT: %[[SP:.*]] = fir.call @llvm.stacksave.p0() : () -> !fir.ref<i8> +// CHECK-NEXT: %[[SP:.*]] = llvm.intr.stacksave : !llvm.ptr // CHECK-NEXT: %[[MEM:.*]] = fir.alloca !fir.array<?xi32>, %[[SUM]] -// CHECK: fir.call @llvm.stackrestore.p0(%[[SP]]) +// CHECK: llvm.intr.stackrestore %[[SP]] : !llvm.ptr // CHECK-NEXT: fir.result // CHECK-NEXT: } // CHECK-NEXT: return diff --git a/libc/cmake/modules/LLVMLibCHeaderRules.cmake b/libc/cmake/modules/LLVMLibCHeaderRules.cmake index c2c675b..76c4e1f 100644 --- a/libc/cmake/modules/LLVMLibCHeaderRules.cmake +++ b/libc/cmake/modules/LLVMLibCHeaderRules.cmake @@ -118,7 +118,7 @@ function(add_gen_header2 target_name) ${entry_points} --output_dir ${out_file} DEPENDS ${yaml_file} ${def_file} ${fq_data_files} - COMMENT "Generating header ${ADD_GEN_HDR2_GE2N_HDR} from ${yaml_file} and ${def_file}" + COMMENT "Generating header ${ADD_GEN_HDR2_GEN_HDR} from ${yaml_file} and ${def_file}" ) if(LIBC_TARGET_OS_IS_GPU) file(MAKE_DIRECTORY ${LIBC_INCLUDE_DIR}/llvm-libc-decls) @@ -135,7 +135,7 @@ function(add_gen_header2 target_name) DEPENDS ${yaml_file} ${fq_data_files} ) endif() - + if(ADD_GEN_HDR2_DEPENDS) get_fq_deps_list(fq_deps_list ${ADD_GEN_HDR2_DEPENDS}) # Dependencies of a add_header target can only be another add_gen_header target diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt index 350072f..5a1ee3b 100644 --- a/libc/src/math/generic/CMakeLists.txt +++ b/libc/src/math/generic/CMakeLists.txt @@ -2958,7 +2958,9 @@ add_entrypoint_object( HDRS ../fmul.h DEPENDS - libc.src.__support.FPUtil.generic.mul + libc.hdr.errno_macros + libc.hdr.fenv_macros + libc.src.__support.FPUtil.double_double COMPILE_OPTIONS -O3 ) diff --git a/libc/src/math/generic/fmul.cpp b/libc/src/math/generic/fmul.cpp index 64c27d6..e759e48 100644 --- a/libc/src/math/generic/fmul.cpp +++ b/libc/src/math/generic/fmul.cpp @@ -5,8 +5,10 @@ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// - #include "src/math/fmul.h" +#include "hdr/errno_macros.h" +#include "hdr/fenv_macros.h" +#include "src/__support/FPUtil/double_double.h" #include "src/__support/FPUtil/generic/mul.h" #include "src/__support/common.h" #include "src/__support/macros/config.h" @@ -14,7 +16,104 @@ namespace LIBC_NAMESPACE_DECL { LLVM_LIBC_FUNCTION(float, fmul, (double x, double y)) { + + // Without FMA instructions, fputil::exact_mult is not + // correctly rounded for all rounding modes, so we fall + // back to the generic `fmul` implementation + +#ifndef LIBC_TARGET_CPU_HAS_FMA return fputil::generic::mul<float>(x, y); -} +#else + fputil::DoubleDouble prod = fputil::exact_mult(x, y); + using DoubleBits = fputil::FPBits<double>; + using DoubleStorageType = typename DoubleBits::StorageType; + using FloatBits = fputil::FPBits<float>; + using FloatStorageType = typename FloatBits::StorageType; + DoubleBits x_bits(x); + DoubleBits y_bits(y); + + Sign result_sign = x_bits.sign() == y_bits.sign() ? Sign::POS : Sign::NEG; + double result = prod.hi; + DoubleBits hi_bits(prod.hi), lo_bits(prod.lo); + // Check for cases where we need to propagate the sticky bits: + constexpr uint64_t STICKY_MASK = 0xFFF'FFF; // Lower (52 - 23 - 1 = 28 bits) + uint64_t sticky_bits = (hi_bits.uintval() & STICKY_MASK); + if (LIBC_UNLIKELY(sticky_bits == 0)) { + // Might need to propagate sticky bits: + if (!(lo_bits.is_inf_or_nan() || lo_bits.is_zero())) { + // Now prod.lo is nonzero and finite, we need to propagate sticky bits. + if (lo_bits.sign() != hi_bits.sign()) + result = DoubleBits(hi_bits.uintval() - 1).get_val(); + else + result = DoubleBits(hi_bits.uintval() | 1).get_val(); + } + } + + float result_f = static_cast<float>(result); + FloatBits rf_bits(result_f); + uint32_t rf_exp = rf_bits.get_biased_exponent(); + if (LIBC_LIKELY(rf_exp > 0 && rf_exp < 2 * FloatBits::EXP_BIAS + 1)) { + return result_f; + } + + // Now result_f is either inf/nan/zero/denormal. + if (x_bits.is_nan() || y_bits.is_nan()) { + if (x_bits.is_signaling_nan() || y_bits.is_signaling_nan()) + fputil::raise_except_if_required(FE_INVALID); + + if (x_bits.is_quiet_nan()) { + DoubleStorageType x_payload = x_bits.get_mantissa(); + x_payload >>= DoubleBits::FRACTION_LEN - FloatBits::FRACTION_LEN; + return FloatBits::quiet_nan(x_bits.sign(), + static_cast<FloatStorageType>(x_payload)) + .get_val(); + } + + if (y_bits.is_quiet_nan()) { + DoubleStorageType y_payload = y_bits.get_mantissa(); + y_payload >>= DoubleBits::FRACTION_LEN - FloatBits::FRACTION_LEN; + return FloatBits::quiet_nan(y_bits.sign(), + static_cast<FloatStorageType>(y_payload)) + .get_val(); + } + + return FloatBits::quiet_nan().get_val(); + } + if (x_bits.is_inf()) { + if (y_bits.is_zero()) { + fputil::set_errno_if_required(EDOM); + fputil::raise_except_if_required(FE_INVALID); + + return FloatBits::quiet_nan().get_val(); + } + + return FloatBits::inf(result_sign).get_val(); + } + + if (y_bits.is_inf()) { + if (x_bits.is_zero()) { + fputil::set_errno_if_required(EDOM); + fputil::raise_except_if_required(FE_INVALID); + return FloatBits::quiet_nan().get_val(); + } + + return FloatBits::inf(result_sign).get_val(); + } + + // Now either x or y is zero, and the other one is finite. + if (rf_bits.is_inf()) { + fputil::set_errno_if_required(ERANGE); + return FloatBits::inf(result_sign).get_val(); + } + + if (x_bits.is_zero() || y_bits.is_zero()) + return FloatBits::zero(result_sign).get_val(); + + fputil::set_errno_if_required(ERANGE); + fputil::raise_except_if_required(FE_UNDERFLOW); + return result_f; + +#endif +} } // namespace LIBC_NAMESPACE_DECL diff --git a/libc/test/src/math/fmul_test.cpp b/libc/test/src/math/fmul_test.cpp index 3f6df66..488e087 100644 --- a/libc/test/src/math/fmul_test.cpp +++ b/libc/test/src/math/fmul_test.cpp @@ -10,4 +10,27 @@ #include "src/math/fmul.h" +#include "test/UnitTest/Test.h" +#include "utils/MPFRWrapper/MPFRUtils.h" + LIST_MUL_TESTS(float, double, LIBC_NAMESPACE::fmul) + +TEST_F(LlvmLibcMulTest, SpecialInputs) { + namespace mpfr = LIBC_NAMESPACE::testing::mpfr; + double INPUTS[][2] = { + {0x1.0100010002p8, 0x1.fffcp14}, + {0x1.000000b92144p-7, 0x1.62p7}, + }; + + for (size_t i = 0; i < 2; ++i) { + double a = INPUTS[i][0]; + + for (int j = 0; j < 180; ++j) { + a *= 0.5; + mpfr::BinaryInput<double> input{a, INPUTS[i][1]}; + ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Mul, input, + LIBC_NAMESPACE::fmul(a, INPUTS[i][1]), + 0.5); + } + } +} diff --git a/libc/test/src/math/performance_testing/CMakeLists.txt b/libc/test/src/math/performance_testing/CMakeLists.txt index ed1b03f..60c074a 100644 --- a/libc/test/src/math/performance_testing/CMakeLists.txt +++ b/libc/test/src/math/performance_testing/CMakeLists.txt @@ -484,6 +484,8 @@ add_perf_binary( DEPENDS .binary_op_single_output_diff libc.src.math.fmul + libc.src.__support.FPUtil.generic.mul + libc.src.__support.FPUtil.fp_bits COMPILE_OPTIONS -fno-builtin ) diff --git a/libc/test/src/math/performance_testing/fmul_perf.cpp b/libc/test/src/math/performance_testing/fmul_perf.cpp index a215405..f15cfaf 100644 --- a/libc/test/src/math/performance_testing/fmul_perf.cpp +++ b/libc/test/src/math/performance_testing/fmul_perf.cpp @@ -7,12 +7,13 @@ //===----------------------------------------------------------------------===// #include "BinaryOpSingleOutputPerf.h" +#include "src/__support/FPUtil/generic/mul.h" #include "src/math/fmul.h" static constexpr size_t DOUBLE_ROUNDS = 40; float fmul_placeholder_binary(double x, double y) { - return static_cast<float>(x * y); + return LIBC_NAMESPACE::fputil::generic::mul<float>(x, y); } int main() { diff --git a/libc/test/src/math/smoke/fmul_test.cpp b/libc/test/src/math/smoke/fmul_test.cpp index 3f6df66..3fcf514 100644 --- a/libc/test/src/math/smoke/fmul_test.cpp +++ b/libc/test/src/math/smoke/fmul_test.cpp @@ -11,3 +11,22 @@ #include "src/math/fmul.h" LIST_MUL_TESTS(float, double, LIBC_NAMESPACE::fmul) + +TEST_F(LlvmLibcMulTest, SpecialInputs) { + constexpr double INPUTS[][2] = { + {0x1.0100010002p8, 0x1.fffcp14}, + {0x1.000000b92144p-7, 0x1.62p7}, + }; + + constexpr float RESULTS[] = { + 0x1.00fdfep+23f, + 0x1.620002p0f, + }; + + constexpr size_t N = sizeof(RESULTS) / sizeof(RESULTS[0]); + + for (size_t i = 0; i < N; ++i) { + float result = LIBC_NAMESPACE::fmul(INPUTS[i][0], INPUTS[i][1]); + EXPECT_FP_EQ(RESULTS[i], result); + } +} diff --git a/libcxx/docs/ReleaseNotes/20.rst b/libcxx/docs/ReleaseNotes/20.rst index aecac66..82c8286 100644 --- a/libcxx/docs/ReleaseNotes/20.rst +++ b/libcxx/docs/ReleaseNotes/20.rst @@ -43,16 +43,21 @@ Implemented Papers - P2985R0: A type trait for detecting virtual base classes (`Github <https://github.com/llvm/llvm-project/issues/105432>`__) - ``std::jthread`` and ``<stop_token>`` are not guarded behind ``-fexperimental-library`` anymore - Improvements and New Features ----------------------------- - The ``lexicographical_compare`` and ``ranges::lexicographical_compare`` algorithms have been optimized for trivially equality comparable types, resulting in a performance improvement of up to 40x. +- The ``_LIBCPP_ENABLE_CXX20_REMOVED_TEMPORARY_BUFFER`` macro has been added to make ``std::get_temporary_buffer`` and + ``std::return_temporary_buffer`` available. + - The ``_LIBCPP_ENABLE_CXX20_REMOVED_UNCAUGHT_EXCEPTION`` macro has been added to make ``std::uncaught_exception`` available in C++20 and later modes. +- The internal structure ``__compressed_pair`` has been replaced with ``[[no_unique_address]]``, resulting in reduced + compile times and smaller debug information as well as better code generation if optimizations are disabled. + The Chromium project measured a 5% reduction in object file and debug information size. Deprecations and Removals ------------------------- @@ -97,8 +102,22 @@ LLVM 21 ABI Affecting Changes --------------------- -- TODO - +- The ABI breaks for removing undefined behaviour in ``std::forward_list``, ``std::list``, ``std::map``, ``std::set``, + ``std::multimap``, ``std::multiset``, ``std::unordered_map``, ``std::unordered_set``, ``std::unordered_multimap`` and + ``std::unordered_multiset`` are now applied unconditionally. This only affects fancy pointers which have a different + value representation when pointing at the base of an internal node type instead of the type itself. A size or + alignment difference is diagnosed, but more subtle ABI breaks may result in unexpected behaviour. + +- The internal structure ``__compressed_pair`` has been replaced with ``[[no_unique_address]]``. The ABI impact is: + + - When using the Itanium ABI (most non-MSVC platforms), empty types are now placed at the beginning of the enclosing + object instead of where the beginning of the ``__compressed_pair`` subobject was. This is only observable by + checking the address of the empty allocator, equality comparator or hasher. + - Additionally, using an overaligned empty type as an allocator, comparator or hasher in the associative containers + (and only those containers) may result in the container's object object size and data layout changing beyond only + the address of the empty member. + - When using the MSVC ABI, this change results in some classes having a completely different memory layout, so this is + a genuine ABI break. However, the library does not currently guarantee ABI stability on MSVC platforms. Build System Changes -------------------- diff --git a/libcxx/docs/Status/Cxx20Papers.csv b/libcxx/docs/Status/Cxx20Papers.csv index d449c9d..4a4d75b 100644 --- a/libcxx/docs/Status/Cxx20Papers.csv +++ b/libcxx/docs/Status/Cxx20Papers.csv @@ -34,7 +34,7 @@ "`P0528R3 <https://wg21.link/P0528R3>`__","The Curious Case of Padding Bits, Featuring Atomic Compare-and-Exchange","2018-06 (Rapperswil)","","","" "`P0542R5 <https://wg21.link/P0542R5>`__","Support for contract based programming in C++","2018-06 (Rapperswil)","|Nothing To Do|","n/a","Pulled at the 2019-07 meeting in Cologne" "`P0556R3 <https://wg21.link/P0556R3>`__","Integral power-of-2 operations","2018-06 (Rapperswil)","|Complete|","9.0","" -"`P0619R4 <https://wg21.link/P0619R4>`__","Reviewing Deprecated Facilities of C++17 for C++20","2018-06 (Rapperswil)","|Partial|","","Only sections D.7, D.8, D.9, D.10, D.11 and D.13 are implemented. Sections D.4 and D.12 remain undone." +"`P0619R4 <https://wg21.link/P0619R4>`__","Reviewing Deprecated Facilities of C++17 for C++20","2018-06 (Rapperswil)","|Partial|","","Only sections D.7, D.8, D.9, D.10, D.11, D.12, and D.13 are implemented. Section D.4 remains undone." "`P0646R1 <https://wg21.link/P0646R1>`__","Improving the Return Value of Erase-Like Algorithms","2018-06 (Rapperswil)","|Complete|","10.0","" "`P0722R3 <https://wg21.link/P0722R3>`__","Efficient sized delete for variable sized classes","2018-06 (Rapperswil)","|Complete|","9.0","" "`P0758R1 <https://wg21.link/P0758R1>`__","Implicit conversion traits and utility functions","2018-06 (Rapperswil)","|Complete|","","" diff --git a/libcxx/docs/UserDocumentation.rst b/libcxx/docs/UserDocumentation.rst index 3651e52..6659fa5 100644 --- a/libcxx/docs/UserDocumentation.rst +++ b/libcxx/docs/UserDocumentation.rst @@ -181,6 +181,9 @@ C++20 Specific Configuration Macros **_LIBCPP_ENABLE_CXX20_REMOVED_RAW_STORAGE_ITERATOR**: This macro is used to re-enable `raw_storage_iterator`. +**_LIBCPP_ENABLE_CXX20_REMOVED_TEMPORARY_BUFFER**: + This macro is used to re-enable `get_temporary_buffer` and `return_temporary_buffer`. + **_LIBCPP_ENABLE_CXX20_REMOVED_TYPE_TRAITS**: This macro is used to re-enable `is_literal_type`, `is_literal_type_v`, `result_of` and `result_of_t`. diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt index 23d9aa0..8c61009 100644 --- a/libcxx/include/CMakeLists.txt +++ b/libcxx/include/CMakeLists.txt @@ -556,6 +556,7 @@ set(files __memory/temporary_buffer.h __memory/uninitialized_algorithms.h __memory/unique_ptr.h + __memory/unique_temporary_buffer.h __memory/uses_allocator.h __memory/uses_allocator_construction.h __memory/voidify.h diff --git a/libcxx/include/__algorithm/inplace_merge.h b/libcxx/include/__algorithm/inplace_merge.h index a6bcc66..2133f44 100644 --- a/libcxx/include/__algorithm/inplace_merge.h +++ b/libcxx/include/__algorithm/inplace_merge.h @@ -24,8 +24,8 @@ #include <__iterator/iterator_traits.h> #include <__iterator/reverse_iterator.h> #include <__memory/destruct_n.h> -#include <__memory/temporary_buffer.h> #include <__memory/unique_ptr.h> +#include <__memory/unique_temporary_buffer.h> #include <__utility/pair.h> #include <new> @@ -208,16 +208,19 @@ _LIBCPP_HIDE_FROM_ABI void __inplace_merge( _BidirectionalIterator __first, _BidirectionalIterator __middle, _BidirectionalIterator __last, _Compare&& __comp) { typedef typename iterator_traits<_BidirectionalIterator>::value_type value_type; typedef typename iterator_traits<_BidirectionalIterator>::difference_type difference_type; - difference_type __len1 = _IterOps<_AlgPolicy>::distance(__first, __middle); - difference_type __len2 = _IterOps<_AlgPolicy>::distance(__middle, __last); - difference_type __buf_size = std::min(__len1, __len2); - // TODO: Remove the use of std::get_temporary_buffer - _LIBCPP_SUPPRESS_DEPRECATED_PUSH - pair<value_type*, ptrdiff_t> __buf = std::get_temporary_buffer<value_type>(__buf_size); - _LIBCPP_SUPPRESS_DEPRECATED_POP - unique_ptr<value_type, __return_temporary_buffer> __h(__buf.first); + difference_type __len1 = _IterOps<_AlgPolicy>::distance(__first, __middle); + difference_type __len2 = _IterOps<_AlgPolicy>::distance(__middle, __last); + difference_type __buf_size = std::min(__len1, __len2); + __unique_temporary_buffer<value_type> __unique_buf = std::__allocate_unique_temporary_buffer<value_type>(__buf_size); return std::__inplace_merge<_AlgPolicy>( - std::move(__first), std::move(__middle), std::move(__last), __comp, __len1, __len2, __buf.first, __buf.second); + std::move(__first), + std::move(__middle), + std::move(__last), + __comp, + __len1, + __len2, + __unique_buf.get(), + __unique_buf.get_deleter().__count_); } template <class _BidirectionalIterator, class _Compare> diff --git a/libcxx/include/__algorithm/stable_partition.h b/libcxx/include/__algorithm/stable_partition.h index 8bb1eaf..bf86201 100644 --- a/libcxx/include/__algorithm/stable_partition.h +++ b/libcxx/include/__algorithm/stable_partition.h @@ -16,8 +16,8 @@ #include <__iterator/distance.h> #include <__iterator/iterator_traits.h> #include <__memory/destruct_n.h> -#include <__memory/temporary_buffer.h> #include <__memory/unique_ptr.h> +#include <__memory/unique_temporary_buffer.h> #include <__utility/move.h> #include <__utility/pair.h> #include <new> @@ -132,14 +132,12 @@ __stable_partition_impl(_ForwardIterator __first, _ForwardIterator __last, _Pred // We now have a reduced range [__first, __last) // *__first is known to be false difference_type __len = _IterOps<_AlgPolicy>::distance(__first, __last); + __unique_temporary_buffer<value_type> __unique_buf; pair<value_type*, ptrdiff_t> __p(0, 0); - unique_ptr<value_type, __return_temporary_buffer> __h; if (__len >= __alloc_limit) { - // TODO: Remove the use of std::get_temporary_buffer - _LIBCPP_SUPPRESS_DEPRECATED_PUSH - __p = std::get_temporary_buffer<value_type>(__len); - _LIBCPP_SUPPRESS_DEPRECATED_POP - __h.reset(__p.first); + __unique_buf = std::__allocate_unique_temporary_buffer<value_type>(__len); + __p.first = __unique_buf.get(); + __p.second = __unique_buf.get_deleter().__count_; } return std::__stable_partition_impl<_AlgPolicy, _Predicate&>( std::move(__first), std::move(__last), __pred, __len, __p, forward_iterator_tag()); @@ -272,14 +270,12 @@ _LIBCPP_HIDE_FROM_ABI _BidirectionalIterator __stable_partition_impl( // *__last is known to be true // __len >= 2 difference_type __len = _IterOps<_AlgPolicy>::distance(__first, __last) + 1; + __unique_temporary_buffer<value_type> __unique_buf; pair<value_type*, ptrdiff_t> __p(0, 0); - unique_ptr<value_type, __return_temporary_buffer> __h; if (__len >= __alloc_limit) { - // TODO: Remove the use of std::get_temporary_buffer - _LIBCPP_SUPPRESS_DEPRECATED_PUSH - __p = std::get_temporary_buffer<value_type>(__len); - _LIBCPP_SUPPRESS_DEPRECATED_POP - __h.reset(__p.first); + __unique_buf = std::__allocate_unique_temporary_buffer<value_type>(__len); + __p.first = __unique_buf.get(); + __p.second = __unique_buf.get_deleter().__count_; } return std::__stable_partition_impl<_AlgPolicy, _Predicate&>( std::move(__first), std::move(__last), __pred, __len, __p, bidirectional_iterator_tag()); diff --git a/libcxx/include/__algorithm/stable_sort.h b/libcxx/include/__algorithm/stable_sort.h index 726e7e16..ec556aa 100644 --- a/libcxx/include/__algorithm/stable_sort.h +++ b/libcxx/include/__algorithm/stable_sort.h @@ -18,8 +18,8 @@ #include <__debug_utils/strict_weak_ordering_check.h> #include <__iterator/iterator_traits.h> #include <__memory/destruct_n.h> -#include <__memory/temporary_buffer.h> #include <__memory/unique_ptr.h> +#include <__memory/unique_temporary_buffer.h> #include <__type_traits/is_trivially_assignable.h> #include <__utility/move.h> #include <__utility/pair.h> @@ -241,14 +241,12 @@ __stable_sort_impl(_RandomAccessIterator __first, _RandomAccessIterator __last, using difference_type = typename iterator_traits<_RandomAccessIterator>::difference_type; difference_type __len = __last - __first; + __unique_temporary_buffer<value_type> __unique_buf; pair<value_type*, ptrdiff_t> __buf(0, 0); - unique_ptr<value_type, __return_temporary_buffer> __h; if (__len > static_cast<difference_type>(__stable_sort_switch<value_type>::value)) { - // TODO: Remove the use of std::get_temporary_buffer - _LIBCPP_SUPPRESS_DEPRECATED_PUSH - __buf = std::get_temporary_buffer<value_type>(__len); - _LIBCPP_SUPPRESS_DEPRECATED_POP - __h.reset(__buf.first); + __unique_buf = std::__allocate_unique_temporary_buffer<value_type>(__len); + __buf.first = __unique_buf.get(); + __buf.second = __unique_buf.get_deleter().__count_; } std::__stable_sort<_AlgPolicy, __comp_ref_type<_Compare> >(__first, __last, __comp, __len, __buf.first, __buf.second); diff --git a/libcxx/include/__chrono/formatter.h b/libcxx/include/__chrono/formatter.h index 449c415..ff2593f 100644 --- a/libcxx/include/__chrono/formatter.h +++ b/libcxx/include/__chrono/formatter.h @@ -10,55 +10,58 @@ #ifndef _LIBCPP___CHRONO_FORMATTER_H #define _LIBCPP___CHRONO_FORMATTER_H -#include <__algorithm/ranges_copy.h> -#include <__chrono/calendar.h> -#include <__chrono/concepts.h> -#include <__chrono/convert_to_tm.h> -#include <__chrono/day.h> -#include <__chrono/duration.h> -#include <__chrono/file_clock.h> -#include <__chrono/hh_mm_ss.h> -#include <__chrono/local_info.h> -#include <__chrono/month.h> -#include <__chrono/month_weekday.h> -#include <__chrono/monthday.h> -#include <__chrono/ostream.h> -#include <__chrono/parser_std_format_spec.h> -#include <__chrono/statically_widen.h> -#include <__chrono/sys_info.h> -#include <__chrono/system_clock.h> -#include <__chrono/time_point.h> -#include <__chrono/weekday.h> -#include <__chrono/year.h> -#include <__chrono/year_month.h> -#include <__chrono/year_month_day.h> -#include <__chrono/year_month_weekday.h> -#include <__chrono/zoned_time.h> -#include <__concepts/arithmetic.h> -#include <__concepts/same_as.h> #include <__config> -#include <__format/concepts.h> -#include <__format/format_error.h> -#include <__format/format_functions.h> -#include <__format/format_parse_context.h> -#include <__format/formatter.h> -#include <__format/parser_std_format_spec.h> -#include <__format/write_escaped.h> -#include <__memory/addressof.h> -#include <__type_traits/is_specialization.h> -#include <cmath> -#include <ctime> -#include <limits> -#include <sstream> -#include <string_view> - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif + +#ifndef _LIBCPP_HAS_NO_LOCALIZATION + +# include <__algorithm/ranges_copy.h> +# include <__chrono/calendar.h> +# include <__chrono/concepts.h> +# include <__chrono/convert_to_tm.h> +# include <__chrono/day.h> +# include <__chrono/duration.h> +# include <__chrono/file_clock.h> +# include <__chrono/hh_mm_ss.h> +# include <__chrono/local_info.h> +# include <__chrono/month.h> +# include <__chrono/month_weekday.h> +# include <__chrono/monthday.h> +# include <__chrono/ostream.h> +# include <__chrono/parser_std_format_spec.h> +# include <__chrono/statically_widen.h> +# include <__chrono/sys_info.h> +# include <__chrono/system_clock.h> +# include <__chrono/time_point.h> +# include <__chrono/weekday.h> +# include <__chrono/year.h> +# include <__chrono/year_month.h> +# include <__chrono/year_month_day.h> +# include <__chrono/year_month_weekday.h> +# include <__chrono/zoned_time.h> +# include <__concepts/arithmetic.h> +# include <__concepts/same_as.h> +# include <__format/concepts.h> +# include <__format/format_error.h> +# include <__format/format_functions.h> +# include <__format/format_parse_context.h> +# include <__format/formatter.h> +# include <__format/parser_std_format_spec.h> +# include <__format/write_escaped.h> +# include <__memory/addressof.h> +# include <__type_traits/is_specialization.h> +# include <cmath> +# include <ctime> +# include <limits> +# include <sstream> +# include <string_view> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif _LIBCPP_BEGIN_NAMESPACE_STD -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 namespace __formatter { @@ -139,24 +142,24 @@ __format_sub_seconds(basic_stringstream<_CharT>& __sstr, const chrono::hh_mm_ss< __value.fractional_width); } -# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && \ - !defined(_LIBCPP_HAS_NO_FILESYSTEM) && !defined(_LIBCPP_HAS_NO_LOCALIZATION) +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && \ + !defined(_LIBCPP_HAS_NO_FILESYSTEM) template <class _CharT, class _Duration, class _TimeZonePtr> _LIBCPP_HIDE_FROM_ABI void __format_sub_seconds(basic_stringstream<_CharT>& __sstr, const chrono::zoned_time<_Duration, _TimeZonePtr>& __value) { __formatter::__format_sub_seconds(__sstr, __value.get_local_time().time_since_epoch()); } -# endif +# endif template <class _Tp> consteval bool __use_fraction() { if constexpr (__is_time_point<_Tp>) return chrono::hh_mm_ss<typename _Tp::duration>::fractional_width; -# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && \ - !defined(_LIBCPP_HAS_NO_FILESYSTEM) && !defined(_LIBCPP_HAS_NO_LOCALIZATION) +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && \ + !defined(_LIBCPP_HAS_NO_FILESYSTEM) else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) return chrono::hh_mm_ss<typename _Tp::duration>::fractional_width; -# endif +# endif else if constexpr (chrono::__is_duration<_Tp>::value) return chrono::hh_mm_ss<_Tp>::fractional_width; else if constexpr (__is_hh_mm_ss<_Tp>) @@ -225,16 +228,15 @@ struct _LIBCPP_HIDE_FROM_ABI __time_zone { template <class _Tp> _LIBCPP_HIDE_FROM_ABI __time_zone __convert_to_time_zone([[maybe_unused]] const _Tp& __value) { -# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) if constexpr (same_as<_Tp, chrono::sys_info>) return {__value.abbrev, __value.offset}; -# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ - !defined(_LIBCPP_HAS_NO_LOCALIZATION) +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) return __formatter::__convert_to_time_zone(__value.get_info()); -# endif +# endif else -# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) return {"UTC", chrono::seconds{0}}; } @@ -341,12 +343,12 @@ _LIBCPP_HIDE_FROM_ABI void __format_chrono_using_chrono_specs( // // TODO FMT evaluate the comment above. -# if defined(__GLIBC__) || defined(_AIX) || defined(_WIN32) +# if defined(__GLIBC__) || defined(_AIX) || defined(_WIN32) case _CharT('y'): // Glibc fails for negative values, AIX for positive values too. __sstr << std::format(_LIBCPP_STATICALLY_WIDEN(_CharT, "{:02}"), (std::abs(__t.tm_year + 1900)) % 100); break; -# endif // defined(__GLIBC__) || defined(_AIX) || defined(_WIN32) +# endif // defined(__GLIBC__) || defined(_AIX) || defined(_WIN32) case _CharT('Y'): // Depending on the platform's libc the range of supported years is @@ -442,17 +444,16 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool __weekday_ok(const _Tp& __value) { return __value.weekday().ok(); else if constexpr (__is_hh_mm_ss<_Tp>) return true; -# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else if constexpr (same_as<_Tp, chrono::sys_info>) return true; else if constexpr (same_as<_Tp, chrono::local_info>) return true; -# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ - !defined(_LIBCPP_HAS_NO_LOCALIZATION) +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) return true; -# endif -# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else static_assert(sizeof(_Tp) == 0, "Add the missing type specialization"); } @@ -493,17 +494,16 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool __weekday_name_ok(const _Tp& __value) { return __value.weekday().ok(); else if constexpr (__is_hh_mm_ss<_Tp>) return true; -# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else if constexpr (same_as<_Tp, chrono::sys_info>) return true; else if constexpr (same_as<_Tp, chrono::local_info>) return true; -# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ - !defined(_LIBCPP_HAS_NO_LOCALIZATION) +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) return true; -# endif -# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else static_assert(sizeof(_Tp) == 0, "Add the missing type specialization"); } @@ -544,17 +544,16 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool __date_ok(const _Tp& __value) { return __value.ok(); else if constexpr (__is_hh_mm_ss<_Tp>) return true; -# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else if constexpr (same_as<_Tp, chrono::sys_info>) return true; else if constexpr (same_as<_Tp, chrono::local_info>) return true; -# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ - !defined(_LIBCPP_HAS_NO_LOCALIZATION) +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) return true; -# endif -# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else static_assert(sizeof(_Tp) == 0, "Add the missing type specialization"); } @@ -595,17 +594,16 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool __month_name_ok(const _Tp& __value) { return __value.month().ok(); else if constexpr (__is_hh_mm_ss<_Tp>) return true; -# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else if constexpr (same_as<_Tp, chrono::sys_info>) return true; else if constexpr (same_as<_Tp, chrono::local_info>) return true; -# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ - !defined(_LIBCPP_HAS_NO_LOCALIZATION) +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) return true; -# endif -# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else static_assert(sizeof(_Tp) == 0, "Add the missing type specialization"); } @@ -943,7 +941,7 @@ public: } }; -# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) template <__fmt_char_type _CharT> struct formatter<chrono::sys_info, _CharT> : public __formatter_chrono<_CharT> { public: @@ -965,8 +963,7 @@ public: return _Base::__parse(__ctx, __format_spec::__fields_chrono, __format_spec::__flags{}); } }; -# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ - !defined(_LIBCPP_HAS_NO_LOCALIZATION) +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) // Note due to how libc++'s formatters are implemented there is no need to add // the exposition only local-time-format-t abstraction. template <class _Duration, class _TimeZonePtr, __fmt_char_type _CharT> @@ -979,12 +976,13 @@ public: return _Base::__parse(__ctx, __format_spec::__fields_chrono, __format_spec::__flags::__clock); } }; -# endif // !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && - // !defined(_LIBCPP_HAS_NO_LOCALIZATION) -# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# endif // !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) -#endif // if _LIBCPP_STD_VER >= 20 +# endif // if _LIBCPP_STD_VER >= 20 _LIBCPP_END_NAMESPACE_STD +#endif // !_LIBCPP_HAS_NO_LOCALIZATION + #endif // _LIBCPP___CHRONO_FORMATTER_H diff --git a/libcxx/include/__chrono/ostream.h b/libcxx/include/__chrono/ostream.h index e6c4325..196ebd5 100644 --- a/libcxx/include/__chrono/ostream.h +++ b/libcxx/include/__chrono/ostream.h @@ -10,37 +10,40 @@ #ifndef _LIBCPP___CHRONO_OSTREAM_H #define _LIBCPP___CHRONO_OSTREAM_H -#include <__chrono/calendar.h> -#include <__chrono/day.h> -#include <__chrono/duration.h> -#include <__chrono/file_clock.h> -#include <__chrono/hh_mm_ss.h> -#include <__chrono/local_info.h> -#include <__chrono/month.h> -#include <__chrono/month_weekday.h> -#include <__chrono/monthday.h> -#include <__chrono/statically_widen.h> -#include <__chrono/sys_info.h> -#include <__chrono/system_clock.h> -#include <__chrono/weekday.h> -#include <__chrono/year.h> -#include <__chrono/year_month.h> -#include <__chrono/year_month_day.h> -#include <__chrono/year_month_weekday.h> -#include <__chrono/zoned_time.h> -#include <__concepts/same_as.h> #include <__config> -#include <__format/format_functions.h> -#include <__fwd/ostream.h> -#include <ratio> -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif +#ifndef _LIBCPP_HAS_NO_LOCALIZATION + +# include <__chrono/calendar.h> +# include <__chrono/day.h> +# include <__chrono/duration.h> +# include <__chrono/file_clock.h> +# include <__chrono/hh_mm_ss.h> +# include <__chrono/local_info.h> +# include <__chrono/month.h> +# include <__chrono/month_weekday.h> +# include <__chrono/monthday.h> +# include <__chrono/statically_widen.h> +# include <__chrono/sys_info.h> +# include <__chrono/system_clock.h> +# include <__chrono/weekday.h> +# include <__chrono/year.h> +# include <__chrono/year_month.h> +# include <__chrono/year_month_day.h> +# include <__chrono/year_month_weekday.h> +# include <__chrono/zoned_time.h> +# include <__concepts/same_as.h> +# include <__format/format_functions.h> +# include <__fwd/ostream.h> +# include <ratio> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif _LIBCPP_BEGIN_NAMESPACE_STD -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 namespace chrono { @@ -82,11 +85,11 @@ _LIBCPP_HIDE_FROM_ABI auto __units_suffix() { else if constexpr (same_as<typename _Period::type, nano>) return _LIBCPP_STATICALLY_WIDEN(_CharT, "ns"); else if constexpr (same_as<typename _Period::type, micro>) -# ifndef _LIBCPP_HAS_NO_UNICODE +# ifndef _LIBCPP_HAS_NO_UNICODE return _LIBCPP_STATICALLY_WIDEN(_CharT, "\u00b5s"); -# else +# else return _LIBCPP_STATICALLY_WIDEN(_CharT, "us"); -# endif +# endif else if constexpr (same_as<typename _Period::type, milli>) return _LIBCPP_STATICALLY_WIDEN(_CharT, "ms"); else if constexpr (same_as<typename _Period::type, centi>) @@ -265,7 +268,7 @@ operator<<(basic_ostream<_CharT, _Traits>& __os, const hh_mm_ss<_Duration> __hms return __os << std::format(__os.getloc(), _LIBCPP_STATICALLY_WIDEN(_CharT, "{:L%T}"), __hms); } -# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) template <class _CharT, class _Traits> _LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& @@ -303,20 +306,21 @@ operator<<(basic_ostream<_CharT, _Traits>& __os, const local_info& __info) { _LIBCPP_STATICALLY_WIDEN(_CharT, "{}: {{{}, {}}}"), __result(), __info.first, __info.second); } -# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ - !defined(_LIBCPP_HAS_NO_LOCALIZATION) +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) template <class _CharT, class _Traits, class _Duration, class _TimeZonePtr> _LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& operator<<(basic_ostream<_CharT, _Traits>& __os, const zoned_time<_Duration, _TimeZonePtr>& __tp) { return __os << std::format(__os.getloc(), _LIBCPP_STATICALLY_WIDEN(_CharT, "{:L%F %T %Z}"), __tp); } -# endif -# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) } // namespace chrono -#endif // if _LIBCPP_STD_VER >= 20 +# endif // if _LIBCPP_STD_VER >= 20 _LIBCPP_END_NAMESPACE_STD +#endif // !_LIBCPP_HAS_NO_LOCALIZATION + #endif // _LIBCPP___CHRONO_OSTREAM_H diff --git a/libcxx/include/__chrono/parser_std_format_spec.h b/libcxx/include/__chrono/parser_std_format_spec.h index 6803d03..c9cfcc6 100644 --- a/libcxx/include/__chrono/parser_std_format_spec.h +++ b/libcxx/include/__chrono/parser_std_format_spec.h @@ -11,20 +11,23 @@ #define _LIBCPP___CHRONO_PARSER_STD_FORMAT_SPEC_H #include <__config> -#include <__format/concepts.h> -#include <__format/format_error.h> -#include <__format/format_parse_context.h> -#include <__format/formatter_string.h> -#include <__format/parser_std_format_spec.h> -#include <string_view> -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif +#if !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +# include <__format/concepts.h> +# include <__format/format_error.h> +# include <__format/format_parse_context.h> +# include <__format/formatter_string.h> +# include <__format/parser_std_format_spec.h> +# include <string_view> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif _LIBCPP_BEGIN_NAMESPACE_STD -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 namespace __format_spec { @@ -409,8 +412,10 @@ private: } // namespace __format_spec -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 _LIBCPP_END_NAMESPACE_STD +#endif // !defined(_LIBCPP_HAS_NO_LOCALIZATION) + #endif // _LIBCPP___CHRONO_PARSER_STD_FORMAT_SPEC_H diff --git a/libcxx/include/__compare/ordering.h b/libcxx/include/__compare/ordering.h index 379f345..297218e 100644 --- a/libcxx/include/__compare/ordering.h +++ b/libcxx/include/__compare/ordering.h @@ -24,7 +24,12 @@ _LIBCPP_BEGIN_NAMESPACE_STD // exposition only enum class _OrdResult : signed char { __less = -1, __equiv = 0, __greater = 1 }; -enum class _NCmpResult : signed char { __unordered = -127 }; +enum class _PartialOrdResult : signed char { + __less = static_cast<signed char>(_OrdResult::__less), + __equiv = static_cast<signed char>(_OrdResult::__equiv), + __greater = static_cast<signed char>(_OrdResult::__greater), + __unordered = -127, +}; class partial_ordering; class weak_ordering; @@ -47,15 +52,7 @@ struct _CmpUnspecifiedParam { }; class partial_ordering { - using _ValueT = signed char; - - _LIBCPP_HIDE_FROM_ABI explicit constexpr partial_ordering(_OrdResult __v) noexcept : __value_(_ValueT(__v)) {} - - _LIBCPP_HIDE_FROM_ABI explicit constexpr partial_ordering(_NCmpResult __v) noexcept : __value_(_ValueT(__v)) {} - - _LIBCPP_HIDE_FROM_ABI constexpr bool __is_ordered() const noexcept { - return __value_ != _ValueT(_NCmpResult::__unordered); - } + _LIBCPP_HIDE_FROM_ABI explicit constexpr partial_ordering(_PartialOrdResult __v) noexcept : __value_(__v) {} public: // valid values @@ -68,39 +65,39 @@ public: _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator==(partial_ordering, partial_ordering) noexcept = default; _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator==(partial_ordering __v, _CmpUnspecifiedParam) noexcept { - return __v.__is_ordered() && __v.__value_ == 0; + return __v.__value_ == _PartialOrdResult::__equiv; } _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator<(partial_ordering __v, _CmpUnspecifiedParam) noexcept { - return __v.__is_ordered() && __v.__value_ < 0; + return __v.__value_ == _PartialOrdResult::__less; } _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator<=(partial_ordering __v, _CmpUnspecifiedParam) noexcept { - return __v.__is_ordered() && __v.__value_ <= 0; + return __v.__value_ == _PartialOrdResult::__equiv || __v.__value_ == _PartialOrdResult::__less; } _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator>(partial_ordering __v, _CmpUnspecifiedParam) noexcept { - return __v.__is_ordered() && __v.__value_ > 0; + return __v.__value_ == _PartialOrdResult::__greater; } _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator>=(partial_ordering __v, _CmpUnspecifiedParam) noexcept { - return __v.__is_ordered() && __v.__value_ >= 0; + return __v.__value_ == _PartialOrdResult::__equiv || __v.__value_ == _PartialOrdResult::__greater; } _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator<(_CmpUnspecifiedParam, partial_ordering __v) noexcept { - return __v.__is_ordered() && 0 < __v.__value_; + return __v.__value_ == _PartialOrdResult::__greater; } _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator<=(_CmpUnspecifiedParam, partial_ordering __v) noexcept { - return __v.__is_ordered() && 0 <= __v.__value_; + return __v.__value_ == _PartialOrdResult::__equiv || __v.__value_ == _PartialOrdResult::__greater; } _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator>(_CmpUnspecifiedParam, partial_ordering __v) noexcept { - return __v.__is_ordered() && 0 > __v.__value_; + return __v.__value_ == _PartialOrdResult::__less; } _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator>=(_CmpUnspecifiedParam, partial_ordering __v) noexcept { - return __v.__is_ordered() && 0 >= __v.__value_; + return __v.__value_ == _PartialOrdResult::__equiv || __v.__value_ == _PartialOrdResult::__less; } _LIBCPP_HIDE_FROM_ABI friend constexpr partial_ordering @@ -114,13 +111,13 @@ public: } private: - _ValueT __value_; + _PartialOrdResult __value_; }; -inline constexpr partial_ordering partial_ordering::less(_OrdResult::__less); -inline constexpr partial_ordering partial_ordering::equivalent(_OrdResult::__equiv); -inline constexpr partial_ordering partial_ordering::greater(_OrdResult::__greater); -inline constexpr partial_ordering partial_ordering::unordered(_NCmpResult ::__unordered); +inline constexpr partial_ordering partial_ordering::less(_PartialOrdResult::__less); +inline constexpr partial_ordering partial_ordering::equivalent(_PartialOrdResult::__equiv); +inline constexpr partial_ordering partial_ordering::greater(_PartialOrdResult::__greater); +inline constexpr partial_ordering partial_ordering::unordered(_PartialOrdResult::__unordered); class weak_ordering { using _ValueT = signed char; diff --git a/libcxx/include/__config b/libcxx/include/__config index 9f3bab3..ff5e3cf5 100644 --- a/libcxx/include/__config +++ b/libcxx/include/__config @@ -31,6 +31,7 @@ # define _LIBCPP_CONCAT_IMPL(_X, _Y) _X##_Y # define _LIBCPP_CONCAT(_X, _Y) _LIBCPP_CONCAT_IMPL(_X, _Y) +# define _LIBCPP_CONCAT3(X, Y, Z) _LIBCPP_CONCAT(X, _LIBCPP_CONCAT(Y, Z)) # if __STDC_HOSTED__ == 0 # define _LIBCPP_FREESTANDING @@ -191,25 +192,6 @@ _LIBCPP_HARDENING_MODE_DEBUG # error "libc++ only supports C++03 with Clang-based compilers. Please enable C++11" # endif -// FIXME: ABI detection should be done via compiler builtin macros. This -// is just a placeholder until Clang implements such macros. For now assume -// that Windows compilers pretending to be MSVC++ target the Microsoft ABI, -// and allow the user to explicitly specify the ABI to handle cases where this -// heuristic falls short. -# if defined(_LIBCPP_ABI_FORCE_ITANIUM) && defined(_LIBCPP_ABI_FORCE_MICROSOFT) -# error "Only one of _LIBCPP_ABI_FORCE_ITANIUM and _LIBCPP_ABI_FORCE_MICROSOFT can be defined" -# elif defined(_LIBCPP_ABI_FORCE_ITANIUM) -# define _LIBCPP_ABI_ITANIUM -# elif defined(_LIBCPP_ABI_FORCE_MICROSOFT) -# define _LIBCPP_ABI_MICROSOFT -# else -# if defined(_WIN32) && defined(_MSC_VER) -# define _LIBCPP_ABI_MICROSOFT -# else -# define _LIBCPP_ABI_ITANIUM -# endif -# endif - # if defined(_LIBCPP_ABI_MICROSOFT) && !defined(_LIBCPP_NO_VCRUNTIME) # define _LIBCPP_ABI_VCRUNTIME # endif diff --git a/libcxx/include/__configuration/abi.h b/libcxx/include/__configuration/abi.h index 8efbb42..707e10b 100644 --- a/libcxx/include/__configuration/abi.h +++ b/libcxx/include/__configuration/abi.h @@ -18,6 +18,25 @@ # pragma GCC system_header #endif +// FIXME: ABI detection should be done via compiler builtin macros. This +// is just a placeholder until Clang implements such macros. For now assume +// that Windows compilers pretending to be MSVC++ target the Microsoft ABI, +// and allow the user to explicitly specify the ABI to handle cases where this +// heuristic falls short. +#if defined(_LIBCPP_ABI_FORCE_ITANIUM) && defined(_LIBCPP_ABI_FORCE_MICROSOFT) +# error "Only one of _LIBCPP_ABI_FORCE_ITANIUM and _LIBCPP_ABI_FORCE_MICROSOFT can be defined" +#elif defined(_LIBCPP_ABI_FORCE_ITANIUM) +# define _LIBCPP_ABI_ITANIUM +#elif defined(_LIBCPP_ABI_FORCE_MICROSOFT) +# define _LIBCPP_ABI_MICROSOFT +#else +# if defined(_WIN32) && defined(_MSC_VER) +# define _LIBCPP_ABI_MICROSOFT +# else +# define _LIBCPP_ABI_ITANIUM +# endif +#endif + #if _LIBCPP_ABI_VERSION >= 2 // Change short string representation so that string data starts at offset 0, // improving its alignment in some cases. @@ -98,6 +117,13 @@ // and WCHAR_MAX. This ABI setting determines whether we should instead track whether the fill // value has been initialized using a separate boolean, which changes the ABI. # define _LIBCPP_ABI_IOS_ALLOW_ARBITRARY_FILL_VALUE +// Historically, libc++ used a type called `__compressed_pair` to reduce storage needs in cases of empty types (e.g. an +// empty allocator in std::vector). We switched to using `[[no_unique_address]]`. However, for ABI compatibility reasons +// we had to add artificial padding in a few places. +// +// This setting disables the addition of such artificial padding, leading to a more optimal +// representation for several types. +# define _LIBCPP_ABI_NO_COMPRESSED_PAIR_PADDING #elif _LIBCPP_ABI_VERSION == 1 # if !(defined(_LIBCPP_OBJECT_FORMAT_COFF) || defined(_LIBCPP_OBJECT_FORMAT_XCOFF)) // Enable compiling copies of now inline methods into the dylib to support @@ -150,6 +176,11 @@ // ABI impact: changes the iterator type of `vector` (except `vector<bool>`). // #define _LIBCPP_ABI_BOUNDED_ITERATORS_IN_VECTOR +// [[msvc::no_unique_address]] seems to mostly affect empty classes, so the padding scheme for Itanium doesn't work. +#if defined(_LIBCPP_ABI_MICROSOFT) && !defined(_LIBCPP_ABI_NO_COMPRESSED_PAIR_PADDING) +# define _LIBCPP_ABI_NO_COMPRESSED_PAIR_PADDING +#endif + #if defined(_LIBCPP_COMPILER_CLANG_BASED) # if defined(__APPLE__) # if defined(__i386__) || defined(__x86_64__) diff --git a/libcxx/include/__functional/function.h b/libcxx/include/__functional/function.h index ff31011..0d95c9a 100644 --- a/libcxx/include/__functional/function.h +++ b/libcxx/include/__functional/function.h @@ -143,45 +143,46 @@ class __default_alloc_func; template <class _Fp, class _Ap, class _Rp, class... _ArgTypes> class __alloc_func<_Fp, _Ap, _Rp(_ArgTypes...)> { - __compressed_pair<_Fp, _Ap> __f_; + _LIBCPP_COMPRESSED_PAIR(_Fp, __func_, _Ap, __alloc_); public: typedef _LIBCPP_NODEBUG _Fp _Target; typedef _LIBCPP_NODEBUG _Ap _Alloc; - _LIBCPP_HIDE_FROM_ABI const _Target& __target() const { return __f_.first(); } + _LIBCPP_HIDE_FROM_ABI const _Target& __target() const { return __func_; } // WIN32 APIs may define __allocator, so use __get_allocator instead. - _LIBCPP_HIDE_FROM_ABI const _Alloc& __get_allocator() const { return __f_.second(); } + _LIBCPP_HIDE_FROM_ABI const _Alloc& __get_allocator() const { return __alloc_; } - _LIBCPP_HIDE_FROM_ABI explicit __alloc_func(_Target&& __f) - : __f_(piecewise_construct, std::forward_as_tuple(std::move(__f)), std::forward_as_tuple()) {} + _LIBCPP_HIDE_FROM_ABI explicit __alloc_func(_Target&& __f) : __func_(std::move(__f)), __alloc_() {} - _LIBCPP_HIDE_FROM_ABI explicit __alloc_func(const _Target& __f, const _Alloc& __a) - : __f_(piecewise_construct, std::forward_as_tuple(__f), std::forward_as_tuple(__a)) {} + _LIBCPP_HIDE_FROM_ABI explicit __alloc_func(const _Target& __f, const _Alloc& __a) : __func_(__f), __alloc_(__a) {} _LIBCPP_HIDE_FROM_ABI explicit __alloc_func(const _Target& __f, _Alloc&& __a) - : __f_(piecewise_construct, std::forward_as_tuple(__f), std::forward_as_tuple(std::move(__a))) {} + : __func_(__f), __alloc_(std::move(__a)) {} _LIBCPP_HIDE_FROM_ABI explicit __alloc_func(_Target&& __f, _Alloc&& __a) - : __f_(piecewise_construct, std::forward_as_tuple(std::move(__f)), std::forward_as_tuple(std::move(__a))) {} + : __func_(std::move(__f)), __alloc_(std::move(__a)) {} _LIBCPP_HIDE_FROM_ABI _Rp operator()(_ArgTypes&&... __arg) { typedef __invoke_void_return_wrapper<_Rp> _Invoker; - return _Invoker::__call(__f_.first(), std::forward<_ArgTypes>(__arg)...); + return _Invoker::__call(__func_, std::forward<_ArgTypes>(__arg)...); } _LIBCPP_HIDE_FROM_ABI __alloc_func* __clone() const { typedef allocator_traits<_Alloc> __alloc_traits; typedef __rebind_alloc<__alloc_traits, __alloc_func> _AA; - _AA __a(__f_.second()); + _AA __a(__alloc_); typedef __allocator_destructor<_AA> _Dp; unique_ptr<__alloc_func, _Dp> __hold(__a.allocate(1), _Dp(__a, 1)); - ::new ((void*)__hold.get()) __alloc_func(__f_.first(), _Alloc(__a)); + ::new ((void*)__hold.get()) __alloc_func(__func_, _Alloc(__a)); return __hold.release(); } - _LIBCPP_HIDE_FROM_ABI void destroy() _NOEXCEPT { __f_.~__compressed_pair<_Target, _Alloc>(); } + _LIBCPP_HIDE_FROM_ABI void destroy() _NOEXCEPT { + __func_.~_Fp(); + __alloc_.~_Alloc(); + } _LIBCPP_HIDE_FROM_ABI static void __destroy_and_delete(__alloc_func* __f) { typedef allocator_traits<_Alloc> __alloc_traits; diff --git a/libcxx/include/__hash_table b/libcxx/include/__hash_table index d5fbc92..be0d65d 100644 --- a/libcxx/include/__hash_table +++ b/libcxx/include/__hash_table @@ -77,11 +77,18 @@ struct __hash_node_base { typedef __hash_node_base __first_node; typedef __rebind_pointer_t<_NodePtr, __first_node> __node_base_pointer; typedef _NodePtr __node_pointer; - -#if defined(_LIBCPP_ABI_FIX_UNORDERED_NODE_POINTER_UB) typedef __node_base_pointer __next_pointer; -#else - typedef __conditional_t<is_pointer<__node_pointer>::value, __node_base_pointer, __node_pointer> __next_pointer; + +// TODO(LLVM 22): Remove this check +#ifndef _LIBCPP_ABI_FIX_UNORDERED_NODE_POINTER_UB + static_assert(sizeof(__node_base_pointer) == sizeof(__node_pointer) && _LIBCPP_ALIGNOF(__node_base_pointer) == + _LIBCPP_ALIGNOF(__node_pointer), + "It looks like you are using std::__hash_table (an implementation detail for the unordered containers) " + "with a fancy pointer type that thas a different representation depending on whether it points to a " + "__hash_table base pointer or a __hash_table node pointer (both of which are implementation details of " + "the standard library). This means that your ABI is being broken between LLVM 19 and LLVM 20. If you " + "don't care about your ABI being broken, define the _LIBCPP_ABI_TREE_REMOVE_NODE_POINTER_UB macro to " + "silence this diagnostic."); #endif __next_pointer __next_; @@ -554,29 +561,29 @@ class __bucket_list_deallocator { typedef allocator_traits<allocator_type> __alloc_traits; typedef typename __alloc_traits::size_type size_type; - __compressed_pair<size_type, allocator_type> __data_; + _LIBCPP_COMPRESSED_PAIR(size_type, __size_, allocator_type, __alloc_); public: typedef typename __alloc_traits::pointer pointer; _LIBCPP_HIDE_FROM_ABI __bucket_list_deallocator() _NOEXCEPT_(is_nothrow_default_constructible<allocator_type>::value) - : __data_(0, __default_init_tag()) {} + : __size_(0) {} _LIBCPP_HIDE_FROM_ABI __bucket_list_deallocator(const allocator_type& __a, size_type __size) _NOEXCEPT_(is_nothrow_copy_constructible<allocator_type>::value) - : __data_(__size, __a) {} + : __size_(__size), __alloc_(__a) {} _LIBCPP_HIDE_FROM_ABI __bucket_list_deallocator(__bucket_list_deallocator&& __x) _NOEXCEPT_(is_nothrow_move_constructible<allocator_type>::value) - : __data_(std::move(__x.__data_)) { + : __size_(std::move(__x.__size_)), __alloc_(std::move(__x.__alloc_)) { __x.size() = 0; } - _LIBCPP_HIDE_FROM_ABI size_type& size() _NOEXCEPT { return __data_.first(); } - _LIBCPP_HIDE_FROM_ABI size_type size() const _NOEXCEPT { return __data_.first(); } + _LIBCPP_HIDE_FROM_ABI size_type& size() _NOEXCEPT { return __size_; } + _LIBCPP_HIDE_FROM_ABI size_type size() const _NOEXCEPT { return __size_; } - _LIBCPP_HIDE_FROM_ABI allocator_type& __alloc() _NOEXCEPT { return __data_.second(); } - _LIBCPP_HIDE_FROM_ABI const allocator_type& __alloc() const _NOEXCEPT { return __data_.second(); } + _LIBCPP_HIDE_FROM_ABI allocator_type& __alloc() _NOEXCEPT { return __alloc_; } + _LIBCPP_HIDE_FROM_ABI const allocator_type& __alloc() const _NOEXCEPT { return __alloc_; } _LIBCPP_HIDE_FROM_ABI void operator()(pointer __p) _NOEXCEPT { __alloc_traits::deallocate(__alloc(), __p, size()); } }; @@ -716,27 +723,27 @@ private: // --- Member data begin --- __bucket_list __bucket_list_; - __compressed_pair<__first_node, __node_allocator> __p1_; - __compressed_pair<size_type, hasher> __p2_; - __compressed_pair<float, key_equal> __p3_; + _LIBCPP_COMPRESSED_PAIR(__first_node, __first_node_, __node_allocator, __node_alloc_); + _LIBCPP_COMPRESSED_PAIR(size_type, __size_, hasher, __hasher_); + _LIBCPP_COMPRESSED_PAIR(float, __max_load_factor_, key_equal, __key_eq_); // --- Member data end --- - _LIBCPP_HIDE_FROM_ABI size_type& size() _NOEXCEPT { return __p2_.first(); } + _LIBCPP_HIDE_FROM_ABI size_type& size() _NOEXCEPT { return __size_; } public: - _LIBCPP_HIDE_FROM_ABI size_type size() const _NOEXCEPT { return __p2_.first(); } + _LIBCPP_HIDE_FROM_ABI size_type size() const _NOEXCEPT { return __size_; } - _LIBCPP_HIDE_FROM_ABI hasher& hash_function() _NOEXCEPT { return __p2_.second(); } - _LIBCPP_HIDE_FROM_ABI const hasher& hash_function() const _NOEXCEPT { return __p2_.second(); } + _LIBCPP_HIDE_FROM_ABI hasher& hash_function() _NOEXCEPT { return __hasher_; } + _LIBCPP_HIDE_FROM_ABI const hasher& hash_function() const _NOEXCEPT { return __hasher_; } - _LIBCPP_HIDE_FROM_ABI float& max_load_factor() _NOEXCEPT { return __p3_.first(); } - _LIBCPP_HIDE_FROM_ABI float max_load_factor() const _NOEXCEPT { return __p3_.first(); } + _LIBCPP_HIDE_FROM_ABI float& max_load_factor() _NOEXCEPT { return __max_load_factor_; } + _LIBCPP_HIDE_FROM_ABI float max_load_factor() const _NOEXCEPT { return __max_load_factor_; } - _LIBCPP_HIDE_FROM_ABI key_equal& key_eq() _NOEXCEPT { return __p3_.second(); } - _LIBCPP_HIDE_FROM_ABI const key_equal& key_eq() const _NOEXCEPT { return __p3_.second(); } + _LIBCPP_HIDE_FROM_ABI key_equal& key_eq() _NOEXCEPT { return __key_eq_; } + _LIBCPP_HIDE_FROM_ABI const key_equal& key_eq() const _NOEXCEPT { return __key_eq_; } - _LIBCPP_HIDE_FROM_ABI __node_allocator& __node_alloc() _NOEXCEPT { return __p1_.second(); } - _LIBCPP_HIDE_FROM_ABI const __node_allocator& __node_alloc() const _NOEXCEPT { return __p1_.second(); } + _LIBCPP_HIDE_FROM_ABI __node_allocator& __node_alloc() _NOEXCEPT { return __node_alloc_; } + _LIBCPP_HIDE_FROM_ABI const __node_allocator& __node_alloc() const _NOEXCEPT { return __node_alloc_; } public: typedef __hash_iterator<__node_pointer> iterator; @@ -1022,26 +1029,34 @@ inline __hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table() _NOEXCEPT_( is_nothrow_default_constructible<__bucket_list>::value&& is_nothrow_default_constructible<__first_node>::value&& is_nothrow_default_constructible<__node_allocator>::value&& is_nothrow_default_constructible<hasher>::value&& is_nothrow_default_constructible<key_equal>::value) - : __p2_(0, __default_init_tag()), __p3_(1.0f, __default_init_tag()) {} + : __size_(0), __max_load_factor_(1.0f) {} template <class _Tp, class _Hash, class _Equal, class _Alloc> inline __hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(const hasher& __hf, const key_equal& __eql) - : __bucket_list_(nullptr, __bucket_list_deleter()), __p1_(), __p2_(0, __hf), __p3_(1.0f, __eql) {} + : __bucket_list_(nullptr, __bucket_list_deleter()), + __first_node_(), + __node_alloc_(), + __size_(0), + __hasher_(__hf), + __max_load_factor_(1.0f), + __key_eq_(__eql) {} template <class _Tp, class _Hash, class _Equal, class _Alloc> __hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table( const hasher& __hf, const key_equal& __eql, const allocator_type& __a) : __bucket_list_(nullptr, __bucket_list_deleter(__pointer_allocator(__a), 0)), - __p1_(__default_init_tag(), __node_allocator(__a)), - __p2_(0, __hf), - __p3_(1.0f, __eql) {} + __node_alloc_(__node_allocator(__a)), + __size_(0), + __hasher_(__hf), + __max_load_factor_(1.0f), + __key_eq_(__eql) {} template <class _Tp, class _Hash, class _Equal, class _Alloc> __hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(const allocator_type& __a) : __bucket_list_(nullptr, __bucket_list_deleter(__pointer_allocator(__a), 0)), - __p1_(__default_init_tag(), __node_allocator(__a)), - __p2_(0, __default_init_tag()), - __p3_(1.0f, __default_init_tag()) {} + __node_alloc_(__node_allocator(__a)), + __size_(0), + __max_load_factor_(1.0f) {} template <class _Tp, class _Hash, class _Equal, class _Alloc> __hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(const __hash_table& __u) @@ -1049,17 +1064,20 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(const __hash_table& __u) __bucket_list_deleter(allocator_traits<__pointer_allocator>::select_on_container_copy_construction( __u.__bucket_list_.get_deleter().__alloc()), 0)), - __p1_(__default_init_tag(), - allocator_traits<__node_allocator>::select_on_container_copy_construction(__u.__node_alloc())), - __p2_(0, __u.hash_function()), - __p3_(__u.__p3_) {} + __node_alloc_(allocator_traits<__node_allocator>::select_on_container_copy_construction(__u.__node_alloc())), + __size_(0), + __hasher_(__u.hash_function()), + __max_load_factor_(__u.__max_load_factor_), + __key_eq_(__u.__key_eq_) {} template <class _Tp, class _Hash, class _Equal, class _Alloc> __hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(const __hash_table& __u, const allocator_type& __a) : __bucket_list_(nullptr, __bucket_list_deleter(__pointer_allocator(__a), 0)), - __p1_(__default_init_tag(), __node_allocator(__a)), - __p2_(0, __u.hash_function()), - __p3_(__u.__p3_) {} + __node_alloc_(__node_allocator(__a)), + __size_(0), + __hasher_(__u.hash_function()), + __max_load_factor_(__u.__max_load_factor_), + __key_eq_(__u.__key_eq_) {} template <class _Tp, class _Hash, class _Equal, class _Alloc> __hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(__hash_table&& __u) _NOEXCEPT_( @@ -1067,12 +1085,15 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(__hash_table&& __u) _NOEX is_nothrow_move_constructible<__node_allocator>::value&& is_nothrow_move_constructible<hasher>::value&& is_nothrow_move_constructible<key_equal>::value) : __bucket_list_(std::move(__u.__bucket_list_)), - __p1_(std::move(__u.__p1_)), - __p2_(std::move(__u.__p2_)), - __p3_(std::move(__u.__p3_)) { + __first_node_(std::move(__u.__first_node_)), + __node_alloc_(std::move(__u.__node_alloc_)), + __size_(std::move(__u.__size_)), + __hasher_(std::move(__u.__hasher_)), + __max_load_factor_(__u.__max_load_factor_), + __key_eq_(std::move(__u.__key_eq_)) { if (size() > 0) { - __bucket_list_[std::__constrain_hash(__p1_.first().__next_->__hash(), bucket_count())] = __p1_.first().__ptr(); - __u.__p1_.first().__next_ = nullptr; + __bucket_list_[std::__constrain_hash(__first_node_.__next_->__hash(), bucket_count())] = __first_node_.__ptr(); + __u.__first_node_.__next_ = nullptr; __u.size() = 0; } } @@ -1080,17 +1101,19 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(__hash_table&& __u) _NOEX template <class _Tp, class _Hash, class _Equal, class _Alloc> __hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(__hash_table&& __u, const allocator_type& __a) : __bucket_list_(nullptr, __bucket_list_deleter(__pointer_allocator(__a), 0)), - __p1_(__default_init_tag(), __node_allocator(__a)), - __p2_(0, std::move(__u.hash_function())), - __p3_(std::move(__u.__p3_)) { + __node_alloc_(__node_allocator(__a)), + __size_(0), + __hasher_(std::move(__u.__hasher_)), + __max_load_factor_(__u.__max_load_factor_), + __key_eq_(std::move(__u.__key_eq_)) { if (__a == allocator_type(__u.__node_alloc())) { __bucket_list_.reset(__u.__bucket_list_.release()); __bucket_list_.get_deleter().size() = __u.__bucket_list_.get_deleter().size(); __u.__bucket_list_.get_deleter().size() = 0; if (__u.size() > 0) { - __p1_.first().__next_ = __u.__p1_.first().__next_; - __u.__p1_.first().__next_ = nullptr; - __bucket_list_[std::__constrain_hash(__p1_.first().__next_->__hash(), bucket_count())] = __p1_.first().__ptr(); + __first_node_.__next_ = __u.__first_node_.__next_; + __u.__first_node_.__next_ = nullptr; + __bucket_list_[std::__constrain_hash(__first_node_.__next_->__hash(), bucket_count())] = __first_node_.__ptr(); size() = __u.size(); __u.size() = 0; } @@ -1104,7 +1127,7 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::~__hash_table() { static_assert(is_copy_constructible<hasher>::value, "Hasher must be copy-constructible."); #endif - __deallocate_node(__p1_.first().__next_); + __deallocate_node(__first_node_.__next_); } template <class _Tp, class _Hash, class _Equal, class _Alloc> @@ -1150,8 +1173,8 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::__detach() _NOEXCEPT { for (size_type __i = 0; __i < __bc; ++__i) __bucket_list_[__i] = nullptr; size() = 0; - __next_pointer __cache = __p1_.first().__next_; - __p1_.first().__next_ = nullptr; + __next_pointer __cache = __first_node_.__next_; + __first_node_.__next_ = nullptr; return __cache; } @@ -1168,10 +1191,10 @@ void __hash_table<_Tp, _Hash, _Equal, _Alloc>::__move_assign(__hash_table& __u, hash_function() = std::move(__u.hash_function()); max_load_factor() = __u.max_load_factor(); key_eq() = std::move(__u.key_eq()); - __p1_.first().__next_ = __u.__p1_.first().__next_; + __first_node_.__next_ = __u.__first_node_.__next_; if (size() > 0) { - __bucket_list_[std::__constrain_hash(__p1_.first().__next_->__hash(), bucket_count())] = __p1_.first().__ptr(); - __u.__p1_.first().__next_ = nullptr; + __bucket_list_[std::__constrain_hash(__first_node_.__next_->__hash(), bucket_count())] = __first_node_.__ptr(); + __u.__first_node_.__next_ = nullptr; __u.size() = 0; } } @@ -1288,7 +1311,7 @@ void __hash_table<_Tp, _Hash, _Equal, _Alloc>::__assign_multi(_InputIterator __f template <class _Tp, class _Hash, class _Equal, class _Alloc> inline typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator __hash_table<_Tp, _Hash, _Equal, _Alloc>::begin() _NOEXCEPT { - return iterator(__p1_.first().__next_); + return iterator(__first_node_.__next_); } template <class _Tp, class _Hash, class _Equal, class _Alloc> @@ -1300,7 +1323,7 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::end() _NOEXCEPT { template <class _Tp, class _Hash, class _Equal, class _Alloc> inline typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::const_iterator __hash_table<_Tp, _Hash, _Equal, _Alloc>::begin() const _NOEXCEPT { - return const_iterator(__p1_.first().__next_); + return const_iterator(__first_node_.__next_); } template <class _Tp, class _Hash, class _Equal, class _Alloc> @@ -1312,8 +1335,8 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::end() const _NOEXCEPT { template <class _Tp, class _Hash, class _Equal, class _Alloc> void __hash_table<_Tp, _Hash, _Equal, _Alloc>::clear() _NOEXCEPT { if (size() > 0) { - __deallocate_node(__p1_.first().__next_); - __p1_.first().__next_ = nullptr; + __deallocate_node(__first_node_.__next_); + __first_node_.__next_ = nullptr; size_type __bc = bucket_count(); for (size_type __i = 0; __i < __bc; ++__i) __bucket_list_[__i] = nullptr; @@ -1365,7 +1388,7 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_insert_unique_perform(__node_po // insert_after __bucket_list_[__chash], or __first_node if bucket is null __next_pointer __pn = __bucket_list_[__chash]; if (__pn == nullptr) { - __pn = __p1_.first().__ptr(); + __pn = __first_node_.__ptr(); __nd->__next_ = __pn->__next_; __pn->__next_ = __nd->__ptr(); // fix up __bucket_list_ @@ -1445,7 +1468,7 @@ void __hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_insert_multi_perform( size_type __bc = bucket_count(); size_t __chash = std::__constrain_hash(__cp->__hash_, __bc); if (__pn == nullptr) { - __pn = __p1_.first().__ptr(); + __pn = __first_node_.__ptr(); __cp->__next_ = __pn->__next_; __pn->__next_ = __cp->__ptr(); // fix up __bucket_list_ @@ -1530,7 +1553,7 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::__emplace_unique_key_args(_Key const& // insert_after __bucket_list_[__chash], or __first_node if bucket is null __next_pointer __pn = __bucket_list_[__chash]; if (__pn == nullptr) { - __pn = __p1_.first().__ptr(); + __pn = __first_node_.__ptr(); __h->__next_ = __pn->__next_; __pn->__next_ = __h.get()->__ptr(); // fix up __bucket_list_ @@ -1708,7 +1731,7 @@ void __hash_table<_Tp, _Hash, _Equal, _Alloc>::__do_rehash(size_type __nbc) { if (__nbc > 0) { for (size_type __i = 0; __i < __nbc; ++__i) __bucket_list_[__i] = nullptr; - __next_pointer __pp = __p1_.first().__ptr(); + __next_pointer __pp = __first_node_.__ptr(); __next_pointer __cp = __pp->__next_; if (__cp != nullptr) { size_type __chash = std::__constrain_hash(__cp->__hash(), __nbc); @@ -1885,7 +1908,7 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::remove(const_iterator __p) _NOEXCEPT { // Fix up __bucket_list_ // if __pn is not in same bucket (before begin is not in same bucket) && // if __cn->__next_ is not in same bucket (nullptr is not in same bucket) - if (__pn == __p1_.first().__ptr() || std::__constrain_hash(__pn->__hash(), __bc) != __chash) { + if (__pn == __first_node_.__ptr() || std::__constrain_hash(__pn->__hash(), __bc) != __chash) { if (__cn->__next_ == nullptr || std::__constrain_hash(__cn->__next_->__hash(), __bc) != __chash) __bucket_list_[__chash] = nullptr; } @@ -2004,14 +2027,17 @@ void __hash_table<_Tp, _Hash, _Equal, _Alloc>::swap(__hash_table& __u) std::swap(__bucket_list_.get_deleter().size(), __u.__bucket_list_.get_deleter().size()); std::__swap_allocator(__bucket_list_.get_deleter().__alloc(), __u.__bucket_list_.get_deleter().__alloc()); std::__swap_allocator(__node_alloc(), __u.__node_alloc()); - std::swap(__p1_.first().__next_, __u.__p1_.first().__next_); - __p2_.swap(__u.__p2_); - __p3_.swap(__u.__p3_); + std::swap(__first_node_.__next_, __u.__first_node_.__next_); + using std::swap; + swap(__size_, __u.__size_); + swap(__hasher_, __u.__hasher_); + swap(__max_load_factor_, __u.__max_load_factor_); + swap(__key_eq_, __u.__key_eq_); if (size() > 0) - __bucket_list_[std::__constrain_hash(__p1_.first().__next_->__hash(), bucket_count())] = __p1_.first().__ptr(); + __bucket_list_[std::__constrain_hash(__first_node_.__next_->__hash(), bucket_count())] = __first_node_.__ptr(); if (__u.size() > 0) - __u.__bucket_list_[std::__constrain_hash(__u.__p1_.first().__next_->__hash(), __u.bucket_count())] = - __u.__p1_.first().__ptr(); + __u.__bucket_list_[std::__constrain_hash(__u.__first_node_.__next_->__hash(), __u.bucket_count())] = + __u.__first_node_.__ptr(); } template <class _Tp, class _Hash, class _Equal, class _Alloc> diff --git a/libcxx/include/__memory/compressed_pair.h b/libcxx/include/__memory/compressed_pair.h index 40e5cfc..629e3ad 100644 --- a/libcxx/include/__memory/compressed_pair.h +++ b/libcxx/include/__memory/compressed_pair.h @@ -11,161 +11,80 @@ #define _LIBCPP___MEMORY_COMPRESSED_PAIR_H #include <__config> -#include <__fwd/tuple.h> -#include <__tuple/tuple_indices.h> -#include <__type_traits/decay.h> -#include <__type_traits/dependent_type.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_constructible.h> +#include <__type_traits/datasizeof.h> #include <__type_traits/is_empty.h> #include <__type_traits/is_final.h> -#include <__type_traits/is_same.h> -#include <__type_traits/is_swappable.h> -#include <__utility/forward.h> -#include <__utility/move.h> -#include <__utility/piecewise_construct.h> -#include <cstddef> +#include <__type_traits/is_reference.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header #endif -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - _LIBCPP_BEGIN_NAMESPACE_STD -// Tag used to default initialize one or both of the pair's elements. -struct __default_init_tag {}; -struct __value_init_tag {}; - -template <class _Tp, int _Idx, bool _CanBeEmptyBase = is_empty<_Tp>::value && !__libcpp_is_final<_Tp>::value> -struct __compressed_pair_elem { - using _ParamT = _Tp; - using reference = _Tp&; - using const_reference = const _Tp&; - - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __compressed_pair_elem(__default_init_tag) {} - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __compressed_pair_elem(__value_init_tag) : __value_() {} - - template <class _Up, __enable_if_t<!is_same<__compressed_pair_elem, __decay_t<_Up> >::value, int> = 0> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __compressed_pair_elem(_Up&& __u) - : __value_(std::forward<_Up>(__u)) {} - -#ifndef _LIBCPP_CXX03_LANG - template <class... _Args, size_t... _Indices> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 explicit __compressed_pair_elem( - piecewise_construct_t, tuple<_Args...> __args, __tuple_indices<_Indices...>) - : __value_(std::forward<_Args>(std::get<_Indices>(__args))...) {} -#endif - - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 reference __get() _NOEXCEPT { return __value_; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR const_reference __get() const _NOEXCEPT { return __value_; } - -private: - _Tp __value_; -}; - -template <class _Tp, int _Idx> -struct __compressed_pair_elem<_Tp, _Idx, true> : private _Tp { - using _ParamT = _Tp; - using reference = _Tp&; - using const_reference = const _Tp&; - using __value_type = _Tp; - - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __compressed_pair_elem() = default; - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __compressed_pair_elem(__default_init_tag) {} - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __compressed_pair_elem(__value_init_tag) : __value_type() {} - - template <class _Up, __enable_if_t<!is_same<__compressed_pair_elem, __decay_t<_Up> >::value, int> = 0> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __compressed_pair_elem(_Up&& __u) - : __value_type(std::forward<_Up>(__u)) {} - -#ifndef _LIBCPP_CXX03_LANG - template <class... _Args, size_t... _Indices> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 - __compressed_pair_elem(piecewise_construct_t, tuple<_Args...> __args, __tuple_indices<_Indices...>) - : __value_type(std::forward<_Args>(std::get<_Indices>(__args))...) {} -#endif - - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 reference __get() _NOEXCEPT { return *this; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR const_reference __get() const _NOEXCEPT { return *this; } -}; +// ================================================================================================================== // +// The utilites here are for staying ABI compatible with the legacy `__compressed_pair`. They should not be used // +// for new data structures. Use `_LIBCPP_NO_UNIQUE_ADDRESS` for new data structures instead (but make sure you // +// understand how it works). // +// ================================================================================================================== // -template <class _T1, class _T2> -class __compressed_pair : private __compressed_pair_elem<_T1, 0>, private __compressed_pair_elem<_T2, 1> { -public: - // NOTE: This static assert should never fire because __compressed_pair - // is *almost never* used in a scenario where it's possible for T1 == T2. - // (The exception is std::function where it is possible that the function - // object and the allocator have the same type). - static_assert( - (!is_same<_T1, _T2>::value), - "__compressed_pair cannot be instantiated when T1 and T2 are the same type; " - "The current implementation is NOT ABI-compatible with the previous implementation for this configuration"); - - using _Base1 _LIBCPP_NODEBUG = __compressed_pair_elem<_T1, 0>; - using _Base2 _LIBCPP_NODEBUG = __compressed_pair_elem<_T2, 1>; - - template <bool _Dummy = true, - __enable_if_t< __dependent_type<is_default_constructible<_T1>, _Dummy>::value && - __dependent_type<is_default_constructible<_T2>, _Dummy>::value, - int> = 0> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __compressed_pair() - : _Base1(__value_init_tag()), _Base2(__value_init_tag()) {} - - template <class _U1, class _U2> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __compressed_pair(_U1&& __t1, _U2&& __t2) - : _Base1(std::forward<_U1>(__t1)), _Base2(std::forward<_U2>(__t2)) {} - -#ifndef _LIBCPP_CXX03_LANG - template <class... _Args1, class... _Args2> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 explicit __compressed_pair( - piecewise_construct_t __pc, tuple<_Args1...> __first_args, tuple<_Args2...> __second_args) - : _Base1(__pc, std::move(__first_args), typename __make_tuple_indices<sizeof...(_Args1)>::type()), - _Base2(__pc, std::move(__second_args), typename __make_tuple_indices<sizeof...(_Args2)>::type()) {} -#endif - - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename _Base1::reference first() _NOEXCEPT { - return static_cast<_Base1&>(*this).__get(); - } - - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR typename _Base1::const_reference first() const _NOEXCEPT { - return static_cast<_Base1 const&>(*this).__get(); - } - - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename _Base2::reference second() _NOEXCEPT { - return static_cast<_Base2&>(*this).__get(); - } - - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR typename _Base2::const_reference second() const _NOEXCEPT { - return static_cast<_Base2 const&>(*this).__get(); - } - - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR static _Base1* __get_first_base(__compressed_pair* __pair) _NOEXCEPT { - return static_cast<_Base1*>(__pair); - } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR static _Base2* __get_second_base(__compressed_pair* __pair) _NOEXCEPT { - return static_cast<_Base2*>(__pair); - } - - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 void swap(__compressed_pair& __x) - _NOEXCEPT_(__is_nothrow_swappable_v<_T1>&& __is_nothrow_swappable_v<_T2>) { - using std::swap; - swap(first(), __x.first()); - swap(second(), __x.second()); - } +// The first member is aligned to the alignment of the second member to force padding in front of the compressed pair +// in case there are members before it. +// +// For example: +// (assuming x86-64 linux) +// class SomeClass { +// uint32_t member1; +// _LIBCPP_COMPRESSED_PAIR(uint32_t, member2, uint64_t, member3); +// } +// +// The layout with __compressed_pair is: +// member1 - offset: 0, size: 4 +// padding - offset: 4, size: 4 +// member2 - offset: 8, size: 4 +// padding - offset: 12, size: 4 +// member3 - offset: 16, size: 8 +// +// If the [[gnu::aligned]] wasn't there, the layout would instead be: +// member1 - offset: 0, size: 4 +// member2 - offset: 4, size: 4 +// member3 - offset: 8, size: 8 + +#ifndef _LIBCPP_ABI_NO_COMPRESSED_PAIR_PADDING + +template <class _ToPad> +class __compressed_pair_padding { + char __padding_[((is_empty<_ToPad>::value && !__libcpp_is_final<_ToPad>::value) || is_reference<_ToPad>::value) + ? 0 + : sizeof(_ToPad) - __datasizeof_v<_ToPad>]; }; -template <class _T1, class _T2> -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 void -swap(__compressed_pair<_T1, _T2>& __x, __compressed_pair<_T1, _T2>& __y) - _NOEXCEPT_(__is_nothrow_swappable_v<_T1>&& __is_nothrow_swappable_v<_T2>) { - __x.swap(__y); -} +# define _LIBCPP_COMPRESSED_PAIR(T1, Initializer1, T2, Initializer2) \ + _LIBCPP_NO_UNIQUE_ADDRESS __attribute__((__aligned__(_LIBCPP_ALIGNOF(T2)))) T1 Initializer1; \ + _LIBCPP_NO_UNIQUE_ADDRESS ::std::__compressed_pair_padding<T1> _LIBCPP_CONCAT3(__padding1_, __LINE__, _); \ + _LIBCPP_NO_UNIQUE_ADDRESS T2 Initializer2; \ + _LIBCPP_NO_UNIQUE_ADDRESS ::std::__compressed_pair_padding<T2> _LIBCPP_CONCAT3(__padding2_, __LINE__, _) + +# define _LIBCPP_COMPRESSED_TRIPLE(T1, Initializer1, T2, Initializer2, T3, Initializer3) \ + _LIBCPP_NO_UNIQUE_ADDRESS \ + __attribute__((__aligned__(_LIBCPP_ALIGNOF(T2)), __aligned__(_LIBCPP_ALIGNOF(T3)))) T1 Initializer1; \ + _LIBCPP_NO_UNIQUE_ADDRESS ::std::__compressed_pair_padding<T1> _LIBCPP_CONCAT3(__padding1_, __LINE__, _); \ + _LIBCPP_NO_UNIQUE_ADDRESS T2 Initializer2; \ + _LIBCPP_NO_UNIQUE_ADDRESS ::std::__compressed_pair_padding<T2> _LIBCPP_CONCAT3(__padding2_, __LINE__, _); \ + _LIBCPP_NO_UNIQUE_ADDRESS T3 Initializer3; \ + _LIBCPP_NO_UNIQUE_ADDRESS ::std::__compressed_pair_padding<T3> _LIBCPP_CONCAT3(__padding3_, __LINE__, _) + +#else +# define _LIBCPP_COMPRESSED_PAIR(T1, Name1, T2, Name2) \ + _LIBCPP_NO_UNIQUE_ADDRESS T1 Name1; \ + _LIBCPP_NO_UNIQUE_ADDRESS T2 Name2 + +# define _LIBCPP_COMPRESSED_TRIPLE(T1, Name1, T2, Name2, T3, Name3) \ + _LIBCPP_NO_UNIQUE_ADDRESS T1 Name1; \ + _LIBCPP_NO_UNIQUE_ADDRESS T2 Name2; \ + _LIBCPP_NO_UNIQUE_ADDRESS T3 Name3 +#endif // _LIBCPP_ABI_NO_COMPRESSED_PAIR_PADDING _LIBCPP_END_NAMESPACE_STD -_LIBCPP_POP_MACROS - #endif // _LIBCPP___MEMORY_COMPRESSED_PAIR_H diff --git a/libcxx/include/__memory/shared_ptr.h b/libcxx/include/__memory/shared_ptr.h index 5dcd475..70964e6 100644 --- a/libcxx/include/__memory/shared_ptr.h +++ b/libcxx/include/__memory/shared_ptr.h @@ -200,11 +200,11 @@ private: template <class _Tp, class _Dp, class _Alloc> class __shared_ptr_pointer : public __shared_weak_count { - __compressed_pair<__compressed_pair<_Tp, _Dp>, _Alloc> __data_; + _LIBCPP_COMPRESSED_TRIPLE(_Tp, __ptr_, _Dp, __deleter_, _Alloc, __alloc_); public: _LIBCPP_HIDE_FROM_ABI __shared_ptr_pointer(_Tp __p, _Dp __d, _Alloc __a) - : __data_(__compressed_pair<_Tp, _Dp>(__p, std::move(__d)), std::move(__a)) {} + : __ptr_(__p), __deleter_(std::move(__d)), __alloc_(std::move(__a)) {} #ifndef _LIBCPP_HAS_NO_RTTI _LIBCPP_HIDE_FROM_ABI_VIRTUAL const void* __get_deleter(const type_info&) const _NOEXCEPT override; @@ -219,15 +219,15 @@ private: template <class _Tp, class _Dp, class _Alloc> const void* __shared_ptr_pointer<_Tp, _Dp, _Alloc>::__get_deleter(const type_info& __t) const _NOEXCEPT { - return __t == typeid(_Dp) ? std::addressof(__data_.first().second()) : nullptr; + return __t == typeid(_Dp) ? std::addressof(__deleter_) : nullptr; } #endif // _LIBCPP_HAS_NO_RTTI template <class _Tp, class _Dp, class _Alloc> void __shared_ptr_pointer<_Tp, _Dp, _Alloc>::__on_zero_shared() _NOEXCEPT { - __data_.first().second()(__data_.first().first()); - __data_.first().second().~_Dp(); + __deleter_(__ptr_); + __deleter_.~_Dp(); } template <class _Tp, class _Dp, class _Alloc> @@ -236,8 +236,8 @@ void __shared_ptr_pointer<_Tp, _Dp, _Alloc>::__on_zero_shared_weak() _NOEXCEPT { typedef allocator_traits<_Al> _ATraits; typedef pointer_traits<typename _ATraits::pointer> _PTraits; - _Al __a(__data_.second()); - __data_.second().~_Alloc(); + _Al __a(__alloc_); + __alloc_.~_Alloc(); __a.deallocate(_PTraits::pointer_to(*this), 1); } @@ -295,36 +295,28 @@ private: allocator_traits<_ControlBlockAlloc>::deallocate(__tmp, pointer_traits<_ControlBlockPointer>::pointer_to(*this), 1); } + // TODO: It should be possible to refactor this to remove `_Storage` entirely. // This class implements the control block for non-array shared pointers created // through `std::allocate_shared` and `std::make_shared`. - // - // In previous versions of the library, we used a compressed pair to store - // both the _Alloc and the _Tp. This implies using EBO, which is incompatible - // with Allocator construction for _Tp. To allow implementing P0674 in C++20, - // we now use a properly aligned char buffer while making sure that we maintain - // the same layout that we had when we used a compressed pair. - using _CompressedPair = __compressed_pair<_Alloc, _Tp>; - struct _ALIGNAS_TYPE(_CompressedPair) _Storage { - char __blob_[sizeof(_CompressedPair)]; + struct _Storage { + struct _Data { + _LIBCPP_COMPRESSED_PAIR(_Alloc, __alloc_, _Tp, __elem_); + }; + + _ALIGNAS_TYPE(_Data) char __buffer_[sizeof(_Data)]; _LIBCPP_HIDE_FROM_ABI explicit _Storage(_Alloc&& __a) { ::new ((void*)__get_alloc()) _Alloc(std::move(__a)); } _LIBCPP_HIDE_FROM_ABI ~_Storage() { __get_alloc()->~_Alloc(); } + _LIBCPP_HIDE_FROM_ABI _Alloc* __get_alloc() _NOEXCEPT { - _CompressedPair* __as_pair = reinterpret_cast<_CompressedPair*>(__blob_); - typename _CompressedPair::_Base1* __first = _CompressedPair::__get_first_base(__as_pair); - _Alloc* __alloc = reinterpret_cast<_Alloc*>(__first); - return __alloc; + return std::addressof(reinterpret_cast<_Data*>(__buffer_)->__alloc_); } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_NO_CFI _Tp* __get_elem() _NOEXCEPT { - _CompressedPair* __as_pair = reinterpret_cast<_CompressedPair*>(__blob_); - typename _CompressedPair::_Base2* __second = _CompressedPair::__get_second_base(__as_pair); - _Tp* __elem = reinterpret_cast<_Tp*>(__second); - return __elem; + return std::addressof(reinterpret_cast<_Data*>(__buffer_)->__elem_); } }; - static_assert(_LIBCPP_ALIGNOF(_Storage) == _LIBCPP_ALIGNOF(_CompressedPair), ""); - static_assert(sizeof(_Storage) == sizeof(_CompressedPair), ""); _Storage __storage_; }; diff --git a/libcxx/include/__memory/temporary_buffer.h b/libcxx/include/__memory/temporary_buffer.h index 633c9dc..219e03f 100644 --- a/libcxx/include/__memory/temporary_buffer.h +++ b/libcxx/include/__memory/temporary_buffer.h @@ -11,6 +11,7 @@ #define _LIBCPP___MEMORY_TEMPORARY_BUFFER_H #include <__config> +#include <__memory/unique_temporary_buffer.h> #include <__utility/pair.h> #include <cstddef> #include <new> @@ -19,57 +20,27 @@ # pragma GCC system_header #endif +#if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_ENABLE_CXX20_REMOVED_TEMPORARY_BUFFER) + _LIBCPP_BEGIN_NAMESPACE_STD template <class _Tp> [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_NO_CFI _LIBCPP_DEPRECATED_IN_CXX17 pair<_Tp*, ptrdiff_t> get_temporary_buffer(ptrdiff_t __n) _NOEXCEPT { - pair<_Tp*, ptrdiff_t> __r(0, 0); - const ptrdiff_t __m = - (~ptrdiff_t(0) ^ ptrdiff_t(ptrdiff_t(1) << (sizeof(ptrdiff_t) * __CHAR_BIT__ - 1))) / sizeof(_Tp); - if (__n > __m) - __n = __m; - while (__n > 0) { -#if !defined(_LIBCPP_HAS_NO_ALIGNED_ALLOCATION) - if (__is_overaligned_for_new(_LIBCPP_ALIGNOF(_Tp))) { - align_val_t __al = align_val_t(_LIBCPP_ALIGNOF(_Tp)); - __r.first = static_cast<_Tp*>(::operator new(__n * sizeof(_Tp), __al, nothrow)); - } else { - __r.first = static_cast<_Tp*>(::operator new(__n * sizeof(_Tp), nothrow)); - } -#else - if (__is_overaligned_for_new(_LIBCPP_ALIGNOF(_Tp))) { - // Since aligned operator new is unavailable, return an empty - // buffer rather than one with invalid alignment. - return __r; - } - - __r.first = static_cast<_Tp*>(::operator new(__n * sizeof(_Tp), nothrow)); -#endif - - if (__r.first) { - __r.second = __n; - break; - } - __n /= 2; - } - return __r; + __unique_temporary_buffer<_Tp> __unique_buf = std::__allocate_unique_temporary_buffer<_Tp>(__n); + pair<_Tp*, ptrdiff_t> __result(__unique_buf.get(), __unique_buf.get_deleter().__count_); + __unique_buf.release(); + return __result; } template <class _Tp> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_DEPRECATED_IN_CXX17 void return_temporary_buffer(_Tp* __p) _NOEXCEPT { - std::__libcpp_deallocate_unsized((void*)__p, _LIBCPP_ALIGNOF(_Tp)); + __unique_temporary_buffer<_Tp> __unique_buf(__p); + (void)__unique_buf; } -struct __return_temporary_buffer { - _LIBCPP_SUPPRESS_DEPRECATED_PUSH - template <class _Tp> - _LIBCPP_HIDE_FROM_ABI void operator()(_Tp* __p) const { - std::return_temporary_buffer(__p); - } - _LIBCPP_SUPPRESS_DEPRECATED_POP -}; - _LIBCPP_END_NAMESPACE_STD +#endif // _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_ENABLE_CXX20_REMOVED_TEMPORARY_BUFFER) + #endif // _LIBCPP___MEMORY_TEMPORARY_BUFFER_H diff --git a/libcxx/include/__memory/unique_ptr.h b/libcxx/include/__memory/unique_ptr.h index 392cf42..9ca13d0 100644 --- a/libcxx/include/__memory/unique_ptr.h +++ b/libcxx/include/__memory/unique_ptr.h @@ -144,7 +144,7 @@ public: void>; private: - __compressed_pair<pointer, deleter_type> __ptr_; + _LIBCPP_COMPRESSED_PAIR(pointer, __ptr_, deleter_type, __deleter_); typedef _LIBCPP_NODEBUG __unique_ptr_deleter_sfinae<_Dp> _DeleterSFINAE; @@ -178,23 +178,25 @@ private: public: template <bool _Dummy = true, class = _EnableIfDeleterDefaultConstructible<_Dummy> > - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR unique_ptr() _NOEXCEPT : __ptr_(__value_init_tag(), __value_init_tag()) {} + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR unique_ptr() _NOEXCEPT : __ptr_(), __deleter_() {} template <bool _Dummy = true, class = _EnableIfDeleterDefaultConstructible<_Dummy> > - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR unique_ptr(nullptr_t) _NOEXCEPT - : __ptr_(__value_init_tag(), __value_init_tag()) {} + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR unique_ptr(nullptr_t) _NOEXCEPT : __ptr_(), __deleter_() {} template <bool _Dummy = true, class = _EnableIfDeleterDefaultConstructible<_Dummy> > - _LIBCPP_HIDE_FROM_ABI - _LIBCPP_CONSTEXPR_SINCE_CXX23 explicit unique_ptr(pointer __p) _NOEXCEPT : __ptr_(__p, __value_init_tag()) {} + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 explicit unique_ptr(pointer __p) _NOEXCEPT + : __ptr_(__p), + __deleter_() {} template <bool _Dummy = true, class = _EnableIfDeleterConstructible<_LValRefType<_Dummy> > > _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unique_ptr(pointer __p, _LValRefType<_Dummy> __d) _NOEXCEPT - : __ptr_(__p, __d) {} + : __ptr_(__p), + __deleter_(__d) {} template <bool _Dummy = true, class = _EnableIfDeleterConstructible<_GoodRValRefType<_Dummy> > > _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unique_ptr(pointer __p, _GoodRValRefType<_Dummy> __d) _NOEXCEPT - : __ptr_(__p, std::move(__d)) { + : __ptr_(__p), + __deleter_(std::move(__d)) { static_assert(!is_reference<deleter_type>::value, "rvalue deleter bound to reference"); } @@ -202,24 +204,26 @@ public: _LIBCPP_HIDE_FROM_ABI unique_ptr(pointer __p, _BadRValRefType<_Dummy> __d) = delete; _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unique_ptr(unique_ptr&& __u) _NOEXCEPT - : __ptr_(__u.release(), std::forward<deleter_type>(__u.get_deleter())) {} + : __ptr_(__u.release()), + __deleter_(std::forward<deleter_type>(__u.get_deleter())) {} template <class _Up, class _Ep, class = _EnableIfMoveConvertible<unique_ptr<_Up, _Ep>, _Up>, class = _EnableIfDeleterConvertible<_Ep> > _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unique_ptr(unique_ptr<_Up, _Ep>&& __u) _NOEXCEPT - : __ptr_(__u.release(), std::forward<_Ep>(__u.get_deleter())) {} + : __ptr_(__u.release()), + __deleter_(std::forward<_Ep>(__u.get_deleter())) {} #if _LIBCPP_STD_VER <= 14 || defined(_LIBCPP_ENABLE_CXX17_REMOVED_AUTO_PTR) template <class _Up, __enable_if_t<is_convertible<_Up*, _Tp*>::value && is_same<_Dp, default_delete<_Tp> >::value, int> = 0> - _LIBCPP_HIDE_FROM_ABI unique_ptr(auto_ptr<_Up>&& __p) _NOEXCEPT : __ptr_(__p.release(), __value_init_tag()) {} + _LIBCPP_HIDE_FROM_ABI unique_ptr(auto_ptr<_Up>&& __p) _NOEXCEPT : __ptr_(__p.release()), __deleter_() {} #endif _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unique_ptr& operator=(unique_ptr&& __u) _NOEXCEPT { reset(__u.release()); - __ptr_.second() = std::forward<deleter_type>(__u.get_deleter()); + __deleter_ = std::forward<deleter_type>(__u.get_deleter()); return *this; } @@ -229,7 +233,7 @@ public: class = _EnableIfDeleterAssignable<_Ep> > _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unique_ptr& operator=(unique_ptr<_Up, _Ep>&& __u) _NOEXCEPT { reset(__u.release()); - __ptr_.second() = std::forward<_Ep>(__u.get_deleter()); + __deleter_ = std::forward<_Ep>(__u.get_deleter()); return *this; } @@ -256,32 +260,36 @@ public: _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 __add_lvalue_reference_t<_Tp> operator*() const _NOEXCEPT_(_NOEXCEPT_(*std::declval<pointer>())) { - return *__ptr_.first(); + return *__ptr_; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 pointer operator->() const _NOEXCEPT { return __ptr_.first(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 pointer get() const _NOEXCEPT { return __ptr_.first(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 deleter_type& get_deleter() _NOEXCEPT { return __ptr_.second(); } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 pointer operator->() const _NOEXCEPT { return __ptr_; } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 pointer get() const _NOEXCEPT { return __ptr_; } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 deleter_type& get_deleter() _NOEXCEPT { return __deleter_; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 const deleter_type& get_deleter() const _NOEXCEPT { - return __ptr_.second(); + return __deleter_; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 explicit operator bool() const _NOEXCEPT { - return __ptr_.first() != nullptr; + return __ptr_ != nullptr; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 pointer release() _NOEXCEPT { - pointer __t = __ptr_.first(); - __ptr_.first() = pointer(); + pointer __t = __ptr_; + __ptr_ = pointer(); return __t; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 void reset(pointer __p = pointer()) _NOEXCEPT { - pointer __tmp = __ptr_.first(); - __ptr_.first() = __p; + pointer __tmp = __ptr_; + __ptr_ = __p; if (__tmp) - __ptr_.second()(__tmp); + __deleter_(__tmp); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 void swap(unique_ptr& __u) _NOEXCEPT { __ptr_.swap(__u.__ptr_); } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 void swap(unique_ptr& __u) _NOEXCEPT { + using std::swap; + swap(__ptr_, __u.__ptr_); + swap(__deleter_, __u.__deleter_); + } }; template <class _Tp, class _Dp> @@ -303,7 +311,7 @@ public: void>; private: - __compressed_pair<pointer, deleter_type> __ptr_; + _LIBCPP_COMPRESSED_PAIR(pointer, __ptr_, deleter_type, __deleter_); template <class _From> struct _CheckArrayPointerConversion : is_same<_From, pointer> {}; @@ -352,42 +360,46 @@ private: public: template <bool _Dummy = true, class = _EnableIfDeleterDefaultConstructible<_Dummy> > - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR unique_ptr() _NOEXCEPT : __ptr_(__value_init_tag(), __value_init_tag()) {} + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR unique_ptr() _NOEXCEPT : __ptr_(), __deleter_() {} template <bool _Dummy = true, class = _EnableIfDeleterDefaultConstructible<_Dummy> > - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR unique_ptr(nullptr_t) _NOEXCEPT - : __ptr_(__value_init_tag(), __value_init_tag()) {} + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR unique_ptr(nullptr_t) _NOEXCEPT : __ptr_(), __deleter_() {} template <class _Pp, bool _Dummy = true, class = _EnableIfDeleterDefaultConstructible<_Dummy>, class = _EnableIfPointerConvertible<_Pp> > - _LIBCPP_HIDE_FROM_ABI - _LIBCPP_CONSTEXPR_SINCE_CXX23 explicit unique_ptr(_Pp __p) _NOEXCEPT : __ptr_(__p, __value_init_tag()) {} + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 explicit unique_ptr(_Pp __p) _NOEXCEPT + : __ptr_(__p), + __deleter_() {} template <class _Pp, bool _Dummy = true, class = _EnableIfDeleterConstructible<_LValRefType<_Dummy> >, class = _EnableIfPointerConvertible<_Pp> > _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unique_ptr(_Pp __p, _LValRefType<_Dummy> __d) _NOEXCEPT - : __ptr_(__p, __d) {} + : __ptr_(__p), + __deleter_(__d) {} template <bool _Dummy = true, class = _EnableIfDeleterConstructible<_LValRefType<_Dummy> > > _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unique_ptr(nullptr_t, _LValRefType<_Dummy> __d) _NOEXCEPT - : __ptr_(nullptr, __d) {} + : __ptr_(nullptr), + __deleter_(__d) {} template <class _Pp, bool _Dummy = true, class = _EnableIfDeleterConstructible<_GoodRValRefType<_Dummy> >, class = _EnableIfPointerConvertible<_Pp> > _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unique_ptr(_Pp __p, _GoodRValRefType<_Dummy> __d) _NOEXCEPT - : __ptr_(__p, std::move(__d)) { + : __ptr_(__p), + __deleter_(std::move(__d)) { static_assert(!is_reference<deleter_type>::value, "rvalue deleter bound to reference"); } template <bool _Dummy = true, class = _EnableIfDeleterConstructible<_GoodRValRefType<_Dummy> > > _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unique_ptr(nullptr_t, _GoodRValRefType<_Dummy> __d) _NOEXCEPT - : __ptr_(nullptr, std::move(__d)) { + : __ptr_(nullptr), + __deleter_(std::move(__d)) { static_assert(!is_reference<deleter_type>::value, "rvalue deleter bound to reference"); } @@ -398,11 +410,12 @@ public: _LIBCPP_HIDE_FROM_ABI unique_ptr(_Pp __p, _BadRValRefType<_Dummy> __d) = delete; _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unique_ptr(unique_ptr&& __u) _NOEXCEPT - : __ptr_(__u.release(), std::forward<deleter_type>(__u.get_deleter())) {} + : __ptr_(__u.release()), + __deleter_(std::forward<deleter_type>(__u.get_deleter())) {} _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unique_ptr& operator=(unique_ptr&& __u) _NOEXCEPT { reset(__u.release()); - __ptr_.second() = std::forward<deleter_type>(__u.get_deleter()); + __deleter_ = std::forward<deleter_type>(__u.get_deleter()); return *this; } @@ -411,7 +424,8 @@ public: class = _EnableIfMoveConvertible<unique_ptr<_Up, _Ep>, _Up>, class = _EnableIfDeleterConvertible<_Ep> > _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unique_ptr(unique_ptr<_Up, _Ep>&& __u) _NOEXCEPT - : __ptr_(__u.release(), std::forward<_Ep>(__u.get_deleter())) {} + : __ptr_(__u.release()), + __deleter_(std::forward<_Ep>(__u.get_deleter())) {} template <class _Up, class _Ep, @@ -419,7 +433,7 @@ public: class = _EnableIfDeleterAssignable<_Ep> > _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 unique_ptr& operator=(unique_ptr<_Up, _Ep>&& __u) _NOEXCEPT { reset(__u.release()); - __ptr_.second() = std::forward<_Ep>(__u.get_deleter()); + __deleter_ = std::forward<_Ep>(__u.get_deleter()); return *this; } @@ -437,41 +451,45 @@ public: } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 __add_lvalue_reference_t<_Tp> operator[](size_t __i) const { - return __ptr_.first()[__i]; + return __ptr_[__i]; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 pointer get() const _NOEXCEPT { return __ptr_.first(); } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 pointer get() const _NOEXCEPT { return __ptr_; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 deleter_type& get_deleter() _NOEXCEPT { return __ptr_.second(); } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 deleter_type& get_deleter() _NOEXCEPT { return __deleter_; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 const deleter_type& get_deleter() const _NOEXCEPT { - return __ptr_.second(); + return __deleter_; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 explicit operator bool() const _NOEXCEPT { - return __ptr_.first() != nullptr; + return __ptr_ != nullptr; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 pointer release() _NOEXCEPT { - pointer __t = __ptr_.first(); - __ptr_.first() = pointer(); + pointer __t = __ptr_; + __ptr_ = pointer(); return __t; } template <class _Pp, __enable_if_t<_CheckArrayPointerConversion<_Pp>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 void reset(_Pp __p) _NOEXCEPT { - pointer __tmp = __ptr_.first(); - __ptr_.first() = __p; + pointer __tmp = __ptr_; + __ptr_ = __p; if (__tmp) - __ptr_.second()(__tmp); + __deleter_(__tmp); } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 void reset(nullptr_t = nullptr) _NOEXCEPT { - pointer __tmp = __ptr_.first(); - __ptr_.first() = nullptr; + pointer __tmp = __ptr_; + __ptr_ = nullptr; if (__tmp) - __ptr_.second()(__tmp); + __deleter_(__tmp); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 void swap(unique_ptr& __u) _NOEXCEPT { __ptr_.swap(__u.__ptr_); } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 void swap(unique_ptr& __u) _NOEXCEPT { + using std::swap; + swap(__ptr_, __u.__ptr_); + swap(__deleter_, __u.__deleter_); + } }; template <class _Tp, class _Dp, __enable_if_t<__is_swappable_v<_Dp>, int> = 0> diff --git a/libcxx/include/__memory/unique_temporary_buffer.h b/libcxx/include/__memory/unique_temporary_buffer.h new file mode 100644 index 0000000..b9e2a47 --- /dev/null +++ b/libcxx/include/__memory/unique_temporary_buffer.h @@ -0,0 +1,92 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___MEMORY_UNIQUE_TEMPORARY_BUFFER_H +#define _LIBCPP___MEMORY_UNIQUE_TEMPORARY_BUFFER_H + +#include <__assert> +#include <__config> + +#include <__memory/allocator.h> +#include <__memory/unique_ptr.h> +#include <__type_traits/is_constant_evaluated.h> +#include <cstddef> +#include <new> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +template <class _Tp> +struct __temporary_buffer_deleter { + ptrdiff_t __count_; // ignored in non-constant evaluation + + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __temporary_buffer_deleter() _NOEXCEPT : __count_(0) {} + _LIBCPP_HIDE_FROM_ABI + _LIBCPP_CONSTEXPR explicit __temporary_buffer_deleter(ptrdiff_t __count) _NOEXCEPT : __count_(__count) {} + + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 void operator()(_Tp* __ptr) _NOEXCEPT { + if (__libcpp_is_constant_evaluated()) { + allocator<_Tp>().deallocate(__ptr, __count_); + return; + } + + std::__libcpp_deallocate_unsized((void*)__ptr, _LIBCPP_ALIGNOF(_Tp)); + } +}; + +template <class _Tp> +using __unique_temporary_buffer = unique_ptr<_Tp, __temporary_buffer_deleter<_Tp> >; + +template <class _Tp> +inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 __unique_temporary_buffer<_Tp> +__allocate_unique_temporary_buffer(ptrdiff_t __count) { + using __deleter_type = __temporary_buffer_deleter<_Tp>; + using __unique_buffer_type = __unique_temporary_buffer<_Tp>; + + if (__libcpp_is_constant_evaluated()) { + return __unique_buffer_type(allocator<_Tp>().allocate(__count), __deleter_type(__count)); + } + + _Tp* __ptr = nullptr; + const ptrdiff_t __max_count = + (~ptrdiff_t(0) ^ ptrdiff_t(ptrdiff_t(1) << (sizeof(ptrdiff_t) * __CHAR_BIT__ - 1))) / sizeof(_Tp); + if (__count > __max_count) + __count = __max_count; + while (__count > 0) { +#if !defined(_LIBCPP_HAS_NO_ALIGNED_ALLOCATION) + if (__is_overaligned_for_new(_LIBCPP_ALIGNOF(_Tp))) { + align_val_t __al = align_val_t(_LIBCPP_ALIGNOF(_Tp)); + __ptr = static_cast<_Tp*>(::operator new(__count * sizeof(_Tp), __al, nothrow)); + } else { + __ptr = static_cast<_Tp*>(::operator new(__count * sizeof(_Tp), nothrow)); + } +#else + if (__is_overaligned_for_new(_LIBCPP_ALIGNOF(_Tp))) { + // Since aligned operator new is unavailable, constructs an empty buffer rather than one with invalid alignment. + return __unique_buffer_type(); + } + + __ptr = static_cast<_Tp*>(::operator new(__count * sizeof(_Tp), nothrow)); +#endif + + if (__ptr) { + break; + } + __count /= 2; + } + + return __unique_buffer_type(__ptr, __deleter_type(__count)); +} + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___MEMORY_UNIQUE_TEMPORARY_BUFFER_H diff --git a/libcxx/include/__ostream/basic_ostream.h b/libcxx/include/__ostream/basic_ostream.h index e0698cc..1f7fe83 100644 --- a/libcxx/include/__ostream/basic_ostream.h +++ b/libcxx/include/__ostream/basic_ostream.h @@ -10,29 +10,32 @@ #define _LIBCPP___OSTREAM_BASIC_OSTREAM_H #include <__config> -#include <__exception/operations.h> -#include <__memory/shared_ptr.h> -#include <__memory/unique_ptr.h> -#include <__system_error/error_code.h> -#include <__type_traits/conjunction.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_base_of.h> -#include <__type_traits/void_t.h> -#include <__utility/declval.h> -#include <bitset> -#include <cstddef> -#include <ios> -#include <locale> -#include <new> // for __throw_bad_alloc -#include <streambuf> -#include <string_view> - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif + +#if !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +# include <__exception/operations.h> +# include <__memory/shared_ptr.h> +# include <__memory/unique_ptr.h> +# include <__system_error/error_code.h> +# include <__type_traits/conjunction.h> +# include <__type_traits/enable_if.h> +# include <__type_traits/is_base_of.h> +# include <__type_traits/void_t.h> +# include <__utility/declval.h> +# include <bitset> +# include <cstddef> +# include <ios> +# include <locale> +# include <new> // for __throw_bad_alloc +# include <streambuf> +# include <string_view> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif _LIBCPP_PUSH_MACROS -#include <__undef_macros> +# include <__undef_macros> _LIBCPP_BEGIN_NAMESPACE_STD @@ -99,19 +102,19 @@ public: basic_ostream& operator<<(long double __f); basic_ostream& operator<<(const void* __p); -#if _LIBCPP_STD_VER >= 23 +# if _LIBCPP_STD_VER >= 23 _LIBCPP_HIDE_FROM_ABI basic_ostream& operator<<(const volatile void* __p) { return operator<<(const_cast<const void*>(__p)); } -#endif +# endif basic_ostream& operator<<(basic_streambuf<char_type, traits_type>* __sb); -#if _LIBCPP_STD_VER >= 17 +# if _LIBCPP_STD_VER >= 17 // LWG 2221 - nullptr. This is not backported to older standards modes. // See https://reviews.llvm.org/D127033 for more info on the rationale. _LIBCPP_HIDE_FROM_ABI basic_ostream& operator<<(nullptr_t) { return *this << "nullptr"; } -#endif +# endif // 27.7.2.7 Unformatted output: basic_ostream& put(char_type __c); @@ -153,15 +156,15 @@ basic_ostream<_CharT, _Traits>::sentry::sentry(basic_ostream<_CharT, _Traits>& _ template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>::sentry::~sentry() { if (__os_.rdbuf() && __os_.good() && (__os_.flags() & ios_base::unitbuf) && uncaught_exceptions() == 0) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS if (__os_.rdbuf()->pubsync() == -1) __os_.setstate(ios_base::badbit); -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS } } @@ -182,15 +185,15 @@ basic_ostream<_CharT, _Traits>::~basic_ostream() {} template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(basic_streambuf<char_type, traits_type>* __sb) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __s(*this); if (__s) { if (__sb) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS typedef istreambuf_iterator<_CharT, _Traits> _Ip; typedef ostreambuf_iterator<_CharT, _Traits> _Op; _Ip __i(__sb); @@ -204,27 +207,27 @@ basic_ostream<_CharT, _Traits>::operator<<(basic_streambuf<char_type, traits_typ } if (__c == 0) this->setstate(ios_base::failbit); -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_failbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS } else this->setstate(ios_base::badbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(bool __n) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __s(*this); if (__s) { typedef num_put<char_type, ostreambuf_iterator<char_type, traits_type> > _Fp; @@ -232,19 +235,19 @@ basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(bool if (__f.put(*this, *this, this->fill(), __n).failed()) this->setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(short __n) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __s(*this); if (__s) { ios_base::fmtflags __flags = ios_base::flags() & ios_base::basefield; @@ -259,19 +262,19 @@ basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(short .failed()) this->setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(unsigned short __n) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __s(*this); if (__s) { typedef num_put<char_type, ostreambuf_iterator<char_type, traits_type> > _Fp; @@ -279,19 +282,19 @@ basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(unsig if (__f.put(*this, *this, this->fill(), static_cast<unsigned long>(__n)).failed()) this->setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(int __n) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __s(*this); if (__s) { ios_base::fmtflags __flags = ios_base::flags() & ios_base::basefield; @@ -306,19 +309,19 @@ basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(int _ .failed()) this->setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(unsigned int __n) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __s(*this); if (__s) { typedef num_put<char_type, ostreambuf_iterator<char_type, traits_type> > _Fp; @@ -326,19 +329,19 @@ basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(unsig if (__f.put(*this, *this, this->fill(), static_cast<unsigned long>(__n)).failed()) this->setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(long __n) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __s(*this); if (__s) { typedef num_put<char_type, ostreambuf_iterator<char_type, traits_type> > _Fp; @@ -346,19 +349,19 @@ basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(long if (__f.put(*this, *this, this->fill(), __n).failed()) this->setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(unsigned long __n) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __s(*this); if (__s) { typedef num_put<char_type, ostreambuf_iterator<char_type, traits_type> > _Fp; @@ -366,19 +369,19 @@ basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(unsig if (__f.put(*this, *this, this->fill(), __n).failed()) this->setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(long long __n) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __s(*this); if (__s) { typedef num_put<char_type, ostreambuf_iterator<char_type, traits_type> > _Fp; @@ -386,19 +389,19 @@ basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(long if (__f.put(*this, *this, this->fill(), __n).failed()) this->setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(unsigned long long __n) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __s(*this); if (__s) { typedef num_put<char_type, ostreambuf_iterator<char_type, traits_type> > _Fp; @@ -406,19 +409,19 @@ basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(unsig if (__f.put(*this, *this, this->fill(), __n).failed()) this->setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(float __n) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __s(*this); if (__s) { typedef num_put<char_type, ostreambuf_iterator<char_type, traits_type> > _Fp; @@ -426,19 +429,19 @@ basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(float if (__f.put(*this, *this, this->fill(), static_cast<double>(__n)).failed()) this->setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(double __n) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __s(*this); if (__s) { typedef num_put<char_type, ostreambuf_iterator<char_type, traits_type> > _Fp; @@ -446,19 +449,19 @@ basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(doubl if (__f.put(*this, *this, this->fill(), __n).failed()) this->setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(long double __n) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __s(*this); if (__s) { typedef num_put<char_type, ostreambuf_iterator<char_type, traits_type> > _Fp; @@ -466,19 +469,19 @@ basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(long if (__f.put(*this, *this, this->fill(), __n).failed()) this->setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(const void* __n) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __s(*this); if (__s) { typedef num_put<char_type, ostreambuf_iterator<char_type, traits_type> > _Fp; @@ -486,20 +489,20 @@ basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::operator<<(const if (__f.put(*this, *this, this->fill(), __n).failed()) this->setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> _LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& __put_character_sequence(basic_ostream<_CharT, _Traits>& __os, const _CharT* __str, size_t __len) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS typename basic_ostream<_CharT, _Traits>::sentry __s(__os); if (__s) { typedef ostreambuf_iterator<_CharT, _Traits> _Ip; @@ -513,11 +516,11 @@ __put_character_sequence(basic_ostream<_CharT, _Traits>& __os, const _CharT* __s .failed()) __os.setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __os.__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return __os; } @@ -528,9 +531,9 @@ _LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& operator<<(basic_ostream<_ template <class _CharT, class _Traits> _LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& operator<<(basic_ostream<_CharT, _Traits>& __os, char __cn) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS typename basic_ostream<_CharT, _Traits>::sentry __s(__os); if (__s) { _CharT __c = __os.widen(__cn); @@ -545,11 +548,11 @@ _LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& operator<<(basic_ostream<_ .failed()) __os.setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __os.__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return __os; } @@ -577,9 +580,9 @@ operator<<(basic_ostream<_CharT, _Traits>& __os, const _CharT* __str) { template <class _CharT, class _Traits> _LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& operator<<(basic_ostream<_CharT, _Traits>& __os, const char* __strn) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS typename basic_ostream<_CharT, _Traits>::sentry __s(__os); if (__s) { typedef ostreambuf_iterator<_CharT, _Traits> _Ip; @@ -606,11 +609,11 @@ operator<<(basic_ostream<_CharT, _Traits>& __os, const char* __strn) { .failed()) __os.setstate(ios_base::badbit | ios_base::failbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __os.__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return __os; } @@ -635,9 +638,9 @@ operator<<(basic_ostream<char, _Traits>& __os, const unsigned char* __str) { template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::put(char_type __c) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __s(*this); if (__s) { typedef ostreambuf_iterator<_CharT, _Traits> _Op; @@ -646,37 +649,37 @@ basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::put(char_type __ if (__o.failed()) this->setstate(ios_base::badbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::write(const char_type* __s, streamsize __n) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS sentry __sen(*this); if (__sen && __n) { if (this->rdbuf()->sputn(__s, __n) != __n) this->setstate(ios_base::badbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } template <class _CharT, class _Traits> basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::flush() { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS if (this->rdbuf()) { sentry __s(*this); if (__s) { @@ -684,11 +687,11 @@ basic_ostream<_CharT, _Traits>& basic_ostream<_CharT, _Traits>::flush() { this->setstate(ios_base::badbit); } } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return *this; } @@ -797,9 +800,9 @@ operator<<(basic_ostream<_CharT, _Traits>& __os, const bitset<_Size>& __x) { std::use_facet<ctype<_CharT> >(__os.getloc()).widen('1')); } -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 -# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS +# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS template <class _Traits> basic_ostream<char, _Traits>& operator<<(basic_ostream<char, _Traits>&, wchar_t) = delete; @@ -818,9 +821,9 @@ basic_ostream<wchar_t, _Traits>& operator<<(basic_ostream<wchar_t, _Traits>&, co template <class _Traits> basic_ostream<wchar_t, _Traits>& operator<<(basic_ostream<wchar_t, _Traits>&, const char32_t*) = delete; -# endif // _LIBCPP_HAS_NO_WIDE_CHARACTERS +# endif // _LIBCPP_HAS_NO_WIDE_CHARACTERS -# ifndef _LIBCPP_HAS_NO_CHAR8_T +# ifndef _LIBCPP_HAS_NO_CHAR8_T template <class _Traits> basic_ostream<char, _Traits>& operator<<(basic_ostream<char, _Traits>&, char8_t) = delete; @@ -832,7 +835,7 @@ basic_ostream<char, _Traits>& operator<<(basic_ostream<char, _Traits>&, const ch template <class _Traits> basic_ostream<wchar_t, _Traits>& operator<<(basic_ostream<wchar_t, _Traits>&, const char8_t*) = delete; -# endif +# endif template <class _Traits> basic_ostream<char, _Traits>& operator<<(basic_ostream<char, _Traits>&, char16_t) = delete; @@ -846,15 +849,17 @@ basic_ostream<char, _Traits>& operator<<(basic_ostream<char, _Traits>&, const ch template <class _Traits> basic_ostream<char, _Traits>& operator<<(basic_ostream<char, _Traits>&, const char32_t*) = delete; -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 extern template class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_ostream<char>; -#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS +# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS extern template class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_ostream<wchar_t>; -#endif +# endif _LIBCPP_END_NAMESPACE_STD _LIBCPP_POP_MACROS +#endif // !defined(_LIBCPP_HAS_NO_LOCALIZATION) + #endif // _LIBCPP___OSTREAM_BASIC_OSTREAM_H diff --git a/libcxx/include/__ostream/print.h b/libcxx/include/__ostream/print.h index 8265ac0..6c82b11 100644 --- a/libcxx/include/__ostream/print.h +++ b/libcxx/include/__ostream/print.h @@ -10,21 +10,24 @@ #define _LIBCPP___OSTREAM_PRINT_H #include <__config> -#include <__fwd/ostream.h> -#include <__iterator/ostreambuf_iterator.h> -#include <__ostream/basic_ostream.h> -#include <format> -#include <ios> -#include <locale> -#include <print> - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif + +#if !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +# include <__fwd/ostream.h> +# include <__iterator/ostreambuf_iterator.h> +# include <__ostream/basic_ostream.h> +# include <format> +# include <ios> +# include <locale> +# include <print> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif _LIBCPP_BEGIN_NAMESPACE_STD -#if _LIBCPP_STD_VER >= 23 +# if _LIBCPP_STD_VER >= 23 template <class = void> // TODO PRINT template or availability markup fires too eagerly (http://llvm.org/PR61563). _LIBCPP_HIDE_FROM_ABI inline void @@ -49,9 +52,9 @@ __vprint_nonunicode(ostream& __os, string_view __fmt, format_args __args, bool _ const char* __str = __o.data(); size_t __len = __o.size(); -# ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -# endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS typedef ostreambuf_iterator<char> _Ip; if (std::__pad_and_output( _Ip(__os), @@ -63,11 +66,11 @@ __vprint_nonunicode(ostream& __os, string_view __fmt, format_args __args, bool _ .failed()) __os.setstate(ios_base::badbit | ios_base::failbit); -# ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __os.__set_badbit_and_consider_rethrow(); } -# endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS } } @@ -91,12 +94,12 @@ _LIBCPP_HIDE_FROM_ABI inline void vprint_nonunicode(ostream& __os, string_view _ // is determined in the same way as the print(FILE*, ...) overloads. _LIBCPP_EXPORTED_FROM_ABI FILE* __get_ostream_file(ostream& __os); -# ifndef _LIBCPP_HAS_NO_UNICODE +# ifndef _LIBCPP_HAS_NO_UNICODE template <class = void> // TODO PRINT template or availability markup fires too eagerly (http://llvm.org/PR61563). _LIBCPP_HIDE_FROM_ABI void __vprint_unicode(ostream& __os, string_view __fmt, format_args __args, bool __write_nl) { -# if _LIBCPP_AVAILABILITY_HAS_PRINT == 0 +# if _LIBCPP_AVAILABILITY_HAS_PRINT == 0 return std::__vprint_nonunicode(__os, __fmt, __args, __write_nl); -# else +# else FILE* __file = std::__get_ostream_file(__os); if (!__file || !__print::__is_terminal(__file)) return std::__vprint_nonunicode(__os, __fmt, __args, __write_nl); @@ -112,49 +115,49 @@ _LIBCPP_HIDE_FROM_ABI void __vprint_unicode(ostream& __os, string_view __fmt, fo // This is the path for the native API, start with flushing. __os.flush(); -# ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -# endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS ostream::sentry __s(__os); if (__s) { -# ifndef _LIBCPP_WIN32API +# ifndef _LIBCPP_WIN32API __print::__vprint_unicode_posix(__file, __fmt, __args, __write_nl, true); -# elif !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS) +# elif !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS) __print::__vprint_unicode_windows(__file, __fmt, __args, __write_nl, true); -# else -# error "Windows builds with wchar_t disabled are not supported." -# endif +# else +# error "Windows builds with wchar_t disabled are not supported." +# endif } -# ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __os.__set_badbit_and_consider_rethrow(); } -# endif // _LIBCPP_HAS_NO_EXCEPTIONS -# endif // _LIBCPP_AVAILABILITY_HAS_PRINT +# endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_AVAILABILITY_HAS_PRINT } template <class = void> // TODO PRINT template or availability markup fires too eagerly (http://llvm.org/PR61563). _LIBCPP_HIDE_FROM_ABI inline void vprint_unicode(ostream& __os, string_view __fmt, format_args __args) { std::__vprint_unicode(__os, __fmt, __args, false); } -# endif // _LIBCPP_HAS_NO_UNICODE +# endif // _LIBCPP_HAS_NO_UNICODE template <class... _Args> _LIBCPP_HIDE_FROM_ABI void print(ostream& __os, format_string<_Args...> __fmt, _Args&&... __args) { -# ifndef _LIBCPP_HAS_NO_UNICODE +# ifndef _LIBCPP_HAS_NO_UNICODE if constexpr (__print::__use_unicode_execution_charset) std::__vprint_unicode(__os, __fmt.get(), std::make_format_args(__args...), false); else std::__vprint_nonunicode(__os, __fmt.get(), std::make_format_args(__args...), false); -# else // _LIBCPP_HAS_NO_UNICODE +# else // _LIBCPP_HAS_NO_UNICODE std::__vprint_nonunicode(__os, __fmt.get(), std::make_format_args(__args...), false); -# endif // _LIBCPP_HAS_NO_UNICODE +# endif // _LIBCPP_HAS_NO_UNICODE } template <class... _Args> _LIBCPP_HIDE_FROM_ABI void println(ostream& __os, format_string<_Args...> __fmt, _Args&&... __args) { -# ifndef _LIBCPP_HAS_NO_UNICODE +# ifndef _LIBCPP_HAS_NO_UNICODE // Note the wording in the Standard is inefficient. The output of // std::format is a std::string which is then copied. This solution // just appends a newline at the end of the output. @@ -162,9 +165,9 @@ _LIBCPP_HIDE_FROM_ABI void println(ostream& __os, format_string<_Args...> __fmt, std::__vprint_unicode(__os, __fmt.get(), std::make_format_args(__args...), true); else std::__vprint_nonunicode(__os, __fmt.get(), std::make_format_args(__args...), true); -# else // _LIBCPP_HAS_NO_UNICODE +# else // _LIBCPP_HAS_NO_UNICODE std::__vprint_nonunicode(__os, __fmt.get(), std::make_format_args(__args...), true); -# endif // _LIBCPP_HAS_NO_UNICODE +# endif // _LIBCPP_HAS_NO_UNICODE } template <class = void> // TODO PRINT template or availability markup fires too eagerly (http://llvm.org/PR61563). @@ -172,8 +175,10 @@ _LIBCPP_HIDE_FROM_ABI inline void println(ostream& __os) { std::print(__os, "\n"); } -#endif // _LIBCPP_STD_VER >= 23 +# endif // _LIBCPP_STD_VER >= 23 _LIBCPP_END_NAMESPACE_STD +#endif // !defined(_LIBCPP_HAS_NO_LOCALIZATION) + #endif // _LIBCPP___OSTREAM_PRINT_H diff --git a/libcxx/include/__split_buffer b/libcxx/include/__split_buffer index bab724d..7916769 100644 --- a/libcxx/include/__split_buffer +++ b/libcxx/include/__split_buffer @@ -78,7 +78,7 @@ public: pointer __first_; pointer __begin_; pointer __end_; - __compressed_pair<pointer, allocator_type> __end_cap_; + _LIBCPP_COMPRESSED_PAIR(pointer, __end_cap_, allocator_type, __alloc_); using __alloc_ref = __add_lvalue_reference_t<allocator_type>; using __alloc_const_ref = __add_lvalue_reference_t<allocator_type>; @@ -88,13 +88,13 @@ public: _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI __split_buffer() _NOEXCEPT_(is_nothrow_default_constructible<allocator_type>::value) - : __first_(nullptr), __begin_(nullptr), __end_(nullptr), __end_cap_(nullptr, __default_init_tag()) {} + : __first_(nullptr), __begin_(nullptr), __end_(nullptr), __end_cap_(nullptr) {} _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI explicit __split_buffer(__alloc_rr& __a) - : __first_(nullptr), __begin_(nullptr), __end_(nullptr), __end_cap_(nullptr, __a) {} + : __first_(nullptr), __begin_(nullptr), __end_(nullptr), __end_cap_(nullptr), __alloc_(__a) {} _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI explicit __split_buffer(const __alloc_rr& __a) - : __first_(nullptr), __begin_(nullptr), __end_(nullptr), __end_cap_(nullptr, __a) {} + : __first_(nullptr), __begin_(nullptr), __end_(nullptr), __end_cap_(nullptr), __alloc_(__a) {} _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI __split_buffer(size_type __cap, size_type __start, __alloc_rr& __a); @@ -111,15 +111,11 @@ public: _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI ~__split_buffer(); - _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI __alloc_rr& __alloc() _NOEXCEPT { return __end_cap_.second(); } - _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI const __alloc_rr& __alloc() const _NOEXCEPT { - return __end_cap_.second(); - } + _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI __alloc_rr& __alloc() _NOEXCEPT { return __alloc_; } + _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI const __alloc_rr& __alloc() const _NOEXCEPT { return __alloc_; } - _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI pointer& __end_cap() _NOEXCEPT { return __end_cap_.first(); } - _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI const pointer& __end_cap() const _NOEXCEPT { - return __end_cap_.first(); - } + _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI pointer& __end_cap() _NOEXCEPT { return __end_cap_; } + _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI const pointer& __end_cap() const _NOEXCEPT { return __end_cap_; } _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI iterator begin() _NOEXCEPT { return __begin_; } _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI const_iterator begin() const _NOEXCEPT { return __begin_; } @@ -346,7 +342,7 @@ __split_buffer<_Tp, _Allocator>::__destruct_at_end(pointer __new_last, true_type template <class _Tp, class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 __split_buffer<_Tp, _Allocator>::__split_buffer(size_type __cap, size_type __start, __alloc_rr& __a) - : __end_cap_(nullptr, __a) { + : __end_cap_(nullptr), __alloc_(__a) { if (__cap == 0) { __first_ = nullptr; } else { @@ -371,7 +367,8 @@ _LIBCPP_CONSTEXPR_SINCE_CXX20 __split_buffer<_Tp, _Allocator>::__split_buffer(__ : __first_(std::move(__c.__first_)), __begin_(std::move(__c.__begin_)), __end_(std::move(__c.__end_)), - __end_cap_(std::move(__c.__end_cap_)) { + __end_cap_(std::move(__c.__end_cap_)), + __alloc_(std::move(__c.__alloc_)) { __c.__first_ = nullptr; __c.__begin_ = nullptr; __c.__end_ = nullptr; @@ -381,7 +378,7 @@ _LIBCPP_CONSTEXPR_SINCE_CXX20 __split_buffer<_Tp, _Allocator>::__split_buffer(__ template <class _Tp, class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 __split_buffer<_Tp, _Allocator>::__split_buffer(__split_buffer&& __c, const __alloc_rr& __a) - : __end_cap_(nullptr, __a) { + : __end_cap_(nullptr), __alloc_(__a) { if (__a == __c.__alloc()) { __first_ = __c.__first_; __begin_ = __c.__begin_; diff --git a/libcxx/include/__std_clang_module b/libcxx/include/__std_clang_module index 5725286..a21ed26 100644 --- a/libcxx/include/__std_clang_module +++ b/libcxx/include/__std_clang_module @@ -73,12 +73,8 @@ #include <ctime> #include <ctype.h> #include <cuchar> -#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS) -# include <cwchar> -#endif -#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS) -# include <cwctype> -#endif +#include <cwchar> +#include <cwctype> #include <deque> #include <errno.h> #include <exception> @@ -193,9 +189,5 @@ #include <variant> #include <vector> #include <version> -#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS) -# include <wchar.h> -#endif -#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS) -# include <wctype.h> -#endif +#include <wchar.h> +#include <wctype.h> diff --git a/libcxx/include/__thread/jthread.h b/libcxx/include/__thread/jthread.h index d85ad3b..8fac72f 100644 --- a/libcxx/include/__thread/jthread.h +++ b/libcxx/include/__thread/jthread.h @@ -30,7 +30,7 @@ _LIBCPP_PUSH_MACROS #include <__undef_macros> -#if _LIBCPP_STD_VER >= 20 +#if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS) _LIBCPP_BEGIN_NAMESPACE_STD @@ -127,7 +127,7 @@ private: _LIBCPP_END_NAMESPACE_STD -#endif // _LIBCPP_STD_VER >= 20 +#endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS) _LIBCPP_POP_MACROS diff --git a/libcxx/include/__thread/this_thread.h b/libcxx/include/__thread/this_thread.h index de7eea2..2318a9b 100644 --- a/libcxx/include/__thread/this_thread.h +++ b/libcxx/include/__thread/this_thread.h @@ -29,6 +29,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD namespace this_thread { +#ifndef _LIBCPP_HAS_NO_THREADS + _LIBCPP_EXPORTED_FROM_ABI void sleep_for(const chrono::nanoseconds& __ns); template <class _Rep, class _Period> @@ -65,6 +67,8 @@ inline _LIBCPP_HIDE_FROM_ABI void sleep_until(const chrono::time_point<chrono::s inline _LIBCPP_HIDE_FROM_ABI void yield() _NOEXCEPT { __libcpp_thread_yield(); } +#endif // !_LIBCPP_HAS_NO_THREADS + } // namespace this_thread _LIBCPP_END_NAMESPACE_STD diff --git a/libcxx/include/__thread/thread.h b/libcxx/include/__thread/thread.h index 458c1cd..96de12e 100644 --- a/libcxx/include/__thread/thread.h +++ b/libcxx/include/__thread/thread.h @@ -38,6 +38,8 @@ _LIBCPP_PUSH_MACROS _LIBCPP_BEGIN_NAMESPACE_STD +#if !defined(_LIBCPP_HAS_NO_THREADS) + template <class _Tp> class __thread_specific_ptr; class _LIBCPP_EXPORTED_FROM_ABI __thread_struct; @@ -118,7 +120,7 @@ struct _LIBCPP_TEMPLATE_VIS hash<__thread_id> : public __unary_function<__thread } }; -#ifndef _LIBCPP_HAS_NO_LOCALIZATION +# ifndef _LIBCPP_HAS_NO_LOCALIZATION template <class _CharT, class _Traits> _LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& operator<<(basic_ostream<_CharT, _Traits>& __os, __thread_id __id) { @@ -143,7 +145,7 @@ operator<<(basic_ostream<_CharT, _Traits>& __os, __thread_id __id) { __sstr << __id.__id_; return __os << __sstr.str(); } -#endif // _LIBCPP_HAS_NO_LOCALIZATION +# endif // _LIBCPP_HAS_NO_LOCALIZATION class _LIBCPP_EXPORTED_FROM_ABI thread { __libcpp_thread_t __t_; @@ -156,13 +158,13 @@ public: typedef __libcpp_thread_t native_handle_type; _LIBCPP_HIDE_FROM_ABI thread() _NOEXCEPT : __t_(_LIBCPP_NULL_THREAD) {} -#ifndef _LIBCPP_CXX03_LANG +# ifndef _LIBCPP_CXX03_LANG template <class _Fp, class... _Args, __enable_if_t<!is_same<__remove_cvref_t<_Fp>, thread>::value, int> = 0> _LIBCPP_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS explicit thread(_Fp&& __f, _Args&&... __args); -#else // _LIBCPP_CXX03_LANG +# else // _LIBCPP_CXX03_LANG template <class _Fp> _LIBCPP_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS explicit thread(_Fp __f); -#endif +# endif ~thread(); _LIBCPP_HIDE_FROM_ABI thread(thread&& __t) _NOEXCEPT : __t_(__t.__t_) { __t.__t_ = _LIBCPP_NULL_THREAD; } @@ -186,7 +188,7 @@ public: static unsigned hardware_concurrency() _NOEXCEPT; }; -#ifndef _LIBCPP_CXX03_LANG +# ifndef _LIBCPP_CXX03_LANG template <class _TSp, class _Fp, class... _Args, size_t... _Indices> inline _LIBCPP_HIDE_FROM_ABI void __thread_execute(tuple<_TSp, _Fp, _Args...>& __t, __tuple_indices<_Indices...>) { @@ -216,7 +218,7 @@ thread::thread(_Fp&& __f, _Args&&... __args) { __throw_system_error(__ec, "thread constructor failed"); } -#else // _LIBCPP_CXX03_LANG +# else // _LIBCPP_CXX03_LANG template <class _Fp> struct __thread_invoke_pair { @@ -248,10 +250,12 @@ thread::thread(_Fp __f) { __throw_system_error(__ec, "thread constructor failed"); } -#endif // _LIBCPP_CXX03_LANG +# endif // _LIBCPP_CXX03_LANG inline _LIBCPP_HIDE_FROM_ABI void swap(thread& __x, thread& __y) _NOEXCEPT { __x.swap(__y); } +#endif // !defined(_LIBCPP_HAS_NO_THREADS) + _LIBCPP_END_NAMESPACE_STD _LIBCPP_POP_MACROS diff --git a/libcxx/include/__tree b/libcxx/include/__tree index 1990fa6..5a3e901 100644 --- a/libcxx/include/__tree +++ b/libcxx/include/__tree @@ -566,11 +566,18 @@ struct __tree_node_base_types { typedef __tree_end_node<__node_base_pointer> __end_node_type; typedef __rebind_pointer_t<_VoidPtr, __end_node_type> __end_node_pointer; -#if defined(_LIBCPP_ABI_TREE_REMOVE_NODE_POINTER_UB) typedef __end_node_pointer __parent_pointer; -#else - typedef __conditional_t< is_pointer<__end_node_pointer>::value, __end_node_pointer, __node_base_pointer> - __parent_pointer; + +// TODO(LLVM 22): Remove this check +#ifndef _LIBCPP_ABI_TREE_REMOVE_NODE_POINTER_UB + static_assert(sizeof(__node_base_pointer) == sizeof(__end_node_pointer) && _LIBCPP_ALIGNOF(__node_base_pointer) == + _LIBCPP_ALIGNOF(__end_node_pointer), + "It looks like you are using std::__tree (an implementation detail for (multi)map/set) with a fancy " + "pointer type that thas a different representation depending on whether it points to a __tree base " + "pointer or a __tree node pointer (both of which are implementation details of the standard library). " + "This means that your ABI is being broken between LLVM 19 and LLVM 20. If you don't care about your " + "ABI being broken, define the _LIBCPP_ABI_TREE_REMOVE_NODE_POINTER_UB macro to silence this " + "diagnostic."); #endif private: @@ -605,12 +612,7 @@ public: typedef _Tp __node_value_type; typedef __rebind_pointer_t<_VoidPtr, __node_value_type> __node_value_type_pointer; typedef __rebind_pointer_t<_VoidPtr, const __node_value_type> __const_node_value_type_pointer; -#if defined(_LIBCPP_ABI_TREE_REMOVE_NODE_POINTER_UB) typedef typename __base::__end_node_pointer __iter_pointer; -#else - typedef __conditional_t< is_pointer<__node_pointer>::value, typename __base::__end_node_pointer, __node_pointer> - __iter_pointer; -#endif private: static_assert(!is_const<__node_type>::value, "_NodePtr should never be a pointer to const"); @@ -932,21 +934,21 @@ private: private: __iter_pointer __begin_node_; - __compressed_pair<__end_node_t, __node_allocator> __pair1_; - __compressed_pair<size_type, value_compare> __pair3_; + _LIBCPP_COMPRESSED_PAIR(__end_node_t, __end_node_, __node_allocator, __node_alloc_); + _LIBCPP_COMPRESSED_PAIR(size_type, __size_, value_compare, __value_comp_); public: _LIBCPP_HIDE_FROM_ABI __iter_pointer __end_node() _NOEXCEPT { - return static_cast<__iter_pointer>(pointer_traits<__end_node_ptr>::pointer_to(__pair1_.first())); + return static_cast<__iter_pointer>(pointer_traits<__end_node_ptr>::pointer_to(__end_node_)); } _LIBCPP_HIDE_FROM_ABI __iter_pointer __end_node() const _NOEXCEPT { return static_cast<__iter_pointer>( - pointer_traits<__end_node_ptr>::pointer_to(const_cast<__end_node_t&>(__pair1_.first()))); + pointer_traits<__end_node_ptr>::pointer_to(const_cast<__end_node_t&>(__end_node_))); } - _LIBCPP_HIDE_FROM_ABI __node_allocator& __node_alloc() _NOEXCEPT { return __pair1_.second(); } + _LIBCPP_HIDE_FROM_ABI __node_allocator& __node_alloc() _NOEXCEPT { return __node_alloc_; } private: - _LIBCPP_HIDE_FROM_ABI const __node_allocator& __node_alloc() const _NOEXCEPT { return __pair1_.second(); } + _LIBCPP_HIDE_FROM_ABI const __node_allocator& __node_alloc() const _NOEXCEPT { return __node_alloc_; } _LIBCPP_HIDE_FROM_ABI __iter_pointer& __begin_node() _NOEXCEPT { return __begin_node_; } _LIBCPP_HIDE_FROM_ABI const __iter_pointer& __begin_node() const _NOEXCEPT { return __begin_node_; } @@ -954,12 +956,12 @@ public: _LIBCPP_HIDE_FROM_ABI allocator_type __alloc() const _NOEXCEPT { return allocator_type(__node_alloc()); } private: - _LIBCPP_HIDE_FROM_ABI size_type& size() _NOEXCEPT { return __pair3_.first(); } + _LIBCPP_HIDE_FROM_ABI size_type& size() _NOEXCEPT { return __size_; } public: - _LIBCPP_HIDE_FROM_ABI const size_type& size() const _NOEXCEPT { return __pair3_.first(); } - _LIBCPP_HIDE_FROM_ABI value_compare& value_comp() _NOEXCEPT { return __pair3_.second(); } - _LIBCPP_HIDE_FROM_ABI const value_compare& value_comp() const _NOEXCEPT { return __pair3_.second(); } + _LIBCPP_HIDE_FROM_ABI const size_type& size() const _NOEXCEPT { return __size_; } + _LIBCPP_HIDE_FROM_ABI value_compare& value_comp() _NOEXCEPT { return __value_comp_; } + _LIBCPP_HIDE_FROM_ABI const value_compare& value_comp() const _NOEXCEPT { return __value_comp_; } public: _LIBCPP_HIDE_FROM_ABI __node_pointer __root() const _NOEXCEPT { @@ -1324,21 +1326,19 @@ private: template <class _Tp, class _Compare, class _Allocator> __tree<_Tp, _Compare, _Allocator>::__tree(const value_compare& __comp) _NOEXCEPT_( is_nothrow_default_constructible<__node_allocator>::value&& is_nothrow_copy_constructible<value_compare>::value) - : __pair3_(0, __comp) { + : __size_(0), __value_comp_(__comp) { __begin_node() = __end_node(); } template <class _Tp, class _Compare, class _Allocator> __tree<_Tp, _Compare, _Allocator>::__tree(const allocator_type& __a) - : __begin_node_(__iter_pointer()), - __pair1_(__default_init_tag(), __node_allocator(__a)), - __pair3_(0, __default_init_tag()) { + : __begin_node_(__iter_pointer()), __node_alloc_(__node_allocator(__a)), __size_(0) { __begin_node() = __end_node(); } template <class _Tp, class _Compare, class _Allocator> __tree<_Tp, _Compare, _Allocator>::__tree(const value_compare& __comp, const allocator_type& __a) - : __begin_node_(__iter_pointer()), __pair1_(__default_init_tag(), __node_allocator(__a)), __pair3_(0, __comp) { + : __begin_node_(__iter_pointer()), __node_alloc_(__node_allocator(__a)), __size_(0), __value_comp_(__comp) { __begin_node() = __end_node(); } @@ -1437,8 +1437,9 @@ void __tree<_Tp, _Compare, _Allocator>::__assign_multi(_InputIterator __first, _ template <class _Tp, class _Compare, class _Allocator> __tree<_Tp, _Compare, _Allocator>::__tree(const __tree& __t) : __begin_node_(__iter_pointer()), - __pair1_(__default_init_tag(), __node_traits::select_on_container_copy_construction(__t.__node_alloc())), - __pair3_(0, __t.value_comp()) { + __node_alloc_(__node_traits::select_on_container_copy_construction(__t.__node_alloc())), + __size_(0), + __value_comp_(__t.value_comp()) { __begin_node() = __end_node(); } @@ -1446,8 +1447,10 @@ template <class _Tp, class _Compare, class _Allocator> __tree<_Tp, _Compare, _Allocator>::__tree(__tree&& __t) _NOEXCEPT_( is_nothrow_move_constructible<__node_allocator>::value&& is_nothrow_move_constructible<value_compare>::value) : __begin_node_(std::move(__t.__begin_node_)), - __pair1_(std::move(__t.__pair1_)), - __pair3_(std::move(__t.__pair3_)) { + __end_node_(std::move(__t.__end_node_)), + __node_alloc_(std::move(__t.__node_alloc_)), + __size_(__t.__size_), + __value_comp_(std::move(__t.__value_comp_)) { if (size() == 0) __begin_node() = __end_node(); else { @@ -1460,7 +1463,7 @@ __tree<_Tp, _Compare, _Allocator>::__tree(__tree&& __t) _NOEXCEPT_( template <class _Tp, class _Compare, class _Allocator> __tree<_Tp, _Compare, _Allocator>::__tree(__tree&& __t, const allocator_type& __a) - : __pair1_(__default_init_tag(), __node_allocator(__a)), __pair3_(0, std::move(__t.value_comp())) { + : __node_alloc_(__node_allocator(__a)), __size_(0), __value_comp_(std::move(__t.value_comp())) { if (__a == __t.__alloc()) { if (__t.size() == 0) __begin_node() = __end_node(); @@ -1482,10 +1485,11 @@ template <class _Tp, class _Compare, class _Allocator> void __tree<_Tp, _Compare, _Allocator>::__move_assign(__tree& __t, true_type) _NOEXCEPT_(is_nothrow_move_assignable<value_compare>::value&& is_nothrow_move_assignable<__node_allocator>::value) { destroy(static_cast<__node_pointer>(__end_node()->__left_)); - __begin_node_ = __t.__begin_node_; - __pair1_.first() = __t.__pair1_.first(); + __begin_node_ = __t.__begin_node_; + __end_node_ = __t.__end_node_; __move_assign_alloc(__t); - __pair3_ = std::move(__t.__pair3_); + __size_ = __t.__size_; + __value_comp_ = std::move(__t.__value_comp_); if (size() == 0) __begin_node() = __end_node(); else { @@ -1554,9 +1558,10 @@ void __tree<_Tp, _Compare, _Allocator>::swap(__tree& __t) { using std::swap; swap(__begin_node_, __t.__begin_node_); - swap(__pair1_.first(), __t.__pair1_.first()); + swap(__end_node_, __t.__end_node_); std::__swap_allocator(__node_alloc(), __t.__node_alloc()); - __pair3_.swap(__t.__pair3_); + swap(__size_, __t.__size_); + swap(__value_comp_, __t.__value_comp_); if (size() == 0) __begin_node() = __end_node(); else diff --git a/libcxx/include/deque b/libcxx/include/deque index f2f6122..78e11fd 100644 --- a/libcxx/include/deque +++ b/libcxx/include/deque @@ -582,12 +582,12 @@ private: __map __map_; size_type __start_; - __compressed_pair<size_type, allocator_type> __size_; + _LIBCPP_COMPRESSED_PAIR(size_type, __size_, allocator_type, __alloc_); public: // construct/copy/destroy: _LIBCPP_HIDE_FROM_ABI deque() _NOEXCEPT_(is_nothrow_default_constructible<allocator_type>::value) - : __start_(0), __size_(0, __default_init_tag()) { + : __start_(0), __size_(0) { __annotate_new(0); } @@ -601,7 +601,7 @@ public: } _LIBCPP_HIDE_FROM_ABI explicit deque(const allocator_type& __a) - : __map_(__pointer_allocator(__a)), __start_(0), __size_(0, __a) { + : __map_(__pointer_allocator(__a)), __start_(0), __size_(0), __alloc_(__a) { __annotate_new(0); } @@ -613,7 +613,7 @@ public: template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI deque(size_type __n, const value_type& __v, const allocator_type& __a) - : __map_(__pointer_allocator(__a)), __start_(0), __size_(0, __a) { + : __map_(__pointer_allocator(__a)), __start_(0), __size_(0), __alloc_(__a) { __annotate_new(0); if (__n > 0) __append(__n, __v); @@ -627,7 +627,7 @@ public: #if _LIBCPP_STD_VER >= 23 template <_ContainerCompatibleRange<_Tp> _Range> _LIBCPP_HIDE_FROM_ABI deque(from_range_t, _Range&& __range, const allocator_type& __a = allocator_type()) - : __map_(__pointer_allocator(__a)), __start_(0), __size_(0, __a) { + : __map_(__pointer_allocator(__a)), __start_(0), __size_(0), __alloc_(__a) { if constexpr (ranges::forward_range<_Range> || ranges::sized_range<_Range>) { __append_with_size(ranges::begin(__range), ranges::distance(__range)); @@ -690,8 +690,8 @@ public: _LIBCPP_HIDE_FROM_ABI void assign(size_type __n, const value_type& __v); _LIBCPP_HIDE_FROM_ABI allocator_type get_allocator() const _NOEXCEPT; - _LIBCPP_HIDE_FROM_ABI allocator_type& __alloc() _NOEXCEPT { return __size_.second(); } - _LIBCPP_HIDE_FROM_ABI const allocator_type& __alloc() const _NOEXCEPT { return __size_.second(); } + _LIBCPP_HIDE_FROM_ABI allocator_type& __alloc() _NOEXCEPT { return __alloc_; } + _LIBCPP_HIDE_FROM_ABI const allocator_type& __alloc() const _NOEXCEPT { return __alloc_; } // iterators: @@ -730,8 +730,8 @@ public: // capacity: _LIBCPP_HIDE_FROM_ABI size_type size() const _NOEXCEPT { return __size(); } - _LIBCPP_HIDE_FROM_ABI size_type& __size() _NOEXCEPT { return __size_.first(); } - _LIBCPP_HIDE_FROM_ABI const size_type& __size() const _NOEXCEPT { return __size_.first(); } + _LIBCPP_HIDE_FROM_ABI size_type& __size() _NOEXCEPT { return __size_; } + _LIBCPP_HIDE_FROM_ABI const size_type& __size() const _NOEXCEPT { return __size_; } _LIBCPP_HIDE_FROM_ABI size_type max_size() const _NOEXCEPT { return std::min<size_type>(__alloc_traits::max_size(__alloc()), numeric_limits<difference_type>::max()); @@ -1251,7 +1251,7 @@ deque(from_range_t, _Range&&, _Alloc = _Alloc()) -> deque<ranges::range_value_t< #endif template <class _Tp, class _Allocator> -deque<_Tp, _Allocator>::deque(size_type __n) : __start_(0), __size_(0, __default_init_tag()) { +deque<_Tp, _Allocator>::deque(size_type __n) : __start_(0), __size_(0) { __annotate_new(0); if (__n > 0) __append(__n); @@ -1260,7 +1260,7 @@ deque<_Tp, _Allocator>::deque(size_type __n) : __start_(0), __size_(0, __default #if _LIBCPP_STD_VER >= 14 template <class _Tp, class _Allocator> deque<_Tp, _Allocator>::deque(size_type __n, const _Allocator& __a) - : __map_(__pointer_allocator(__a)), __start_(0), __size_(0, __a) { + : __map_(__pointer_allocator(__a)), __start_(0), __size_(0), __alloc_(__a) { __annotate_new(0); if (__n > 0) __append(__n); @@ -1268,7 +1268,7 @@ deque<_Tp, _Allocator>::deque(size_type __n, const _Allocator& __a) #endif template <class _Tp, class _Allocator> -deque<_Tp, _Allocator>::deque(size_type __n, const value_type& __v) : __start_(0), __size_(0, __default_init_tag()) { +deque<_Tp, _Allocator>::deque(size_type __n, const value_type& __v) : __start_(0), __size_(0) { __annotate_new(0); if (__n > 0) __append(__n, __v); @@ -1276,7 +1276,7 @@ deque<_Tp, _Allocator>::deque(size_type __n, const value_type& __v) : __start_(0 template <class _Tp, class _Allocator> template <class _InputIter, __enable_if_t<__has_input_iterator_category<_InputIter>::value, int> > -deque<_Tp, _Allocator>::deque(_InputIter __f, _InputIter __l) : __start_(0), __size_(0, __default_init_tag()) { +deque<_Tp, _Allocator>::deque(_InputIter __f, _InputIter __l) : __start_(0), __size_(0) { __annotate_new(0); __append(__f, __l); } @@ -1284,7 +1284,7 @@ deque<_Tp, _Allocator>::deque(_InputIter __f, _InputIter __l) : __start_(0), __s template <class _Tp, class _Allocator> template <class _InputIter, __enable_if_t<__has_input_iterator_category<_InputIter>::value, int> > deque<_Tp, _Allocator>::deque(_InputIter __f, _InputIter __l, const allocator_type& __a) - : __map_(__pointer_allocator(__a)), __start_(0), __size_(0, __a) { + : __map_(__pointer_allocator(__a)), __start_(0), __size_(0), __alloc_(__a) { __annotate_new(0); __append(__f, __l); } @@ -1293,14 +1293,15 @@ template <class _Tp, class _Allocator> deque<_Tp, _Allocator>::deque(const deque& __c) : __map_(__pointer_allocator(__alloc_traits::select_on_container_copy_construction(__c.__alloc()))), __start_(0), - __size_(0, __map_.__alloc()) { + __size_(0), + __alloc_(__map_.__alloc()) { __annotate_new(0); __append(__c.begin(), __c.end()); } template <class _Tp, class _Allocator> deque<_Tp, _Allocator>::deque(const deque& __c, const __type_identity_t<allocator_type>& __a) - : __map_(__pointer_allocator(__a)), __start_(0), __size_(0, __a) { + : __map_(__pointer_allocator(__a)), __start_(0), __size_(0), __alloc_(__a) { __annotate_new(0); __append(__c.begin(), __c.end()); } @@ -1317,21 +1318,24 @@ deque<_Tp, _Allocator>& deque<_Tp, _Allocator>::operator=(const deque& __c) { #ifndef _LIBCPP_CXX03_LANG template <class _Tp, class _Allocator> -deque<_Tp, _Allocator>::deque(initializer_list<value_type> __il) : __start_(0), __size_(0, __default_init_tag()) { +deque<_Tp, _Allocator>::deque(initializer_list<value_type> __il) : __start_(0), __size_(0) { __annotate_new(0); __append(__il.begin(), __il.end()); } template <class _Tp, class _Allocator> deque<_Tp, _Allocator>::deque(initializer_list<value_type> __il, const allocator_type& __a) - : __map_(__pointer_allocator(__a)), __start_(0), __size_(0, __a) { + : __map_(__pointer_allocator(__a)), __start_(0), __size_(0), __alloc_(__a) { __annotate_new(0); __append(__il.begin(), __il.end()); } template <class _Tp, class _Allocator> inline deque<_Tp, _Allocator>::deque(deque&& __c) noexcept(is_nothrow_move_constructible<allocator_type>::value) - : __map_(std::move(__c.__map_)), __start_(std::move(__c.__start_)), __size_(std::move(__c.__size_)) { + : __map_(std::move(__c.__map_)), + __start_(std::move(__c.__start_)), + __size_(std::move(__c.__size_)), + __alloc_(std::move(__c.__alloc_)) { __c.__start_ = 0; __c.__size() = 0; } @@ -1340,7 +1344,8 @@ template <class _Tp, class _Allocator> inline deque<_Tp, _Allocator>::deque(deque&& __c, const __type_identity_t<allocator_type>& __a) : __map_(std::move(__c.__map_), __pointer_allocator(__a)), __start_(std::move(__c.__start_)), - __size_(std::move(__c.__size()), __a) { + __size_(std::move(__c.__size_)), + __alloc_(__a) { if (__a == __c.__alloc()) { __c.__start_ = 0; __c.__size() = 0; diff --git a/libcxx/include/forward_list b/libcxx/include/forward_list index 9a80413..c5ae8ad 100644 --- a/libcxx/include/forward_list +++ b/libcxx/include/forward_list @@ -229,6 +229,7 @@ template <class T, class Allocator, class Predicate> #include <__type_traits/type_identity.h> #include <__utility/forward.h> #include <__utility/move.h> +#include <__utility/swap.h> #include <limits> #include <new> // __launder #include <version> @@ -277,18 +278,20 @@ struct __forward_node_traits { typedef __rebind_pointer_t<_NodePtr, __begin_node> __begin_node_pointer; typedef __rebind_pointer_t<_NodePtr, void> __void_pointer; -#if defined(_LIBCPP_ABI_FORWARD_LIST_REMOVE_NODE_POINTER_UB) - typedef __begin_node_pointer __iter_node_pointer; -#else - typedef __conditional_t<is_pointer<__void_pointer>::value, __begin_node_pointer, __node_pointer> __iter_node_pointer; +// TODO(LLVM 22): Remove this check +#ifndef _LIBCPP_ABI_FORWARD_LIST_REMOVE_NODE_POINTER_UB + static_assert(sizeof(__begin_node_pointer) == sizeof(__node_pointer) && _LIBCPP_ALIGNOF(__begin_node_pointer) == + _LIBCPP_ALIGNOF(__node_pointer), + "It looks like you are using std::forward_list with a fancy pointer type that thas a different " + "representation depending on whether it points to a forward_list base pointer or a forward_list node " + "pointer (both of which are implementation details of the standard library). This means that your ABI " + "is being broken between LLVM 19 and LLVM 20. If you don't care about your ABI being broken, define " + "the _LIBCPP_ABI_FORWARD_LIST_REMOVE_NODE_POINTER_UB macro to silence this diagnostic."); #endif - typedef __conditional_t<is_same<__iter_node_pointer, __node_pointer>::value, __begin_node_pointer, __node_pointer> - __non_iter_node_pointer; - - _LIBCPP_HIDE_FROM_ABI static __iter_node_pointer __as_iter_node(__iter_node_pointer __p) { return __p; } - _LIBCPP_HIDE_FROM_ABI static __iter_node_pointer __as_iter_node(__non_iter_node_pointer __p) { - return static_cast<__iter_node_pointer>(static_cast<__void_pointer>(__p)); + _LIBCPP_HIDE_FROM_ABI static __begin_node_pointer __as_iter_node(__begin_node_pointer __p) { return __p; } + _LIBCPP_HIDE_FROM_ABI static __begin_node_pointer __as_iter_node(__node_pointer __p) { + return static_cast<__begin_node_pointer>(static_cast<__void_pointer>(__p)); } }; @@ -350,10 +353,9 @@ class _LIBCPP_TEMPLATE_VIS __forward_list_iterator { typedef __forward_node_traits<_NodePtr> __traits; typedef typename __traits::__node_pointer __node_pointer; typedef typename __traits::__begin_node_pointer __begin_node_pointer; - typedef typename __traits::__iter_node_pointer __iter_node_pointer; typedef typename __traits::__void_pointer __void_pointer; - __iter_node_pointer __ptr_; + __begin_node_pointer __ptr_; _LIBCPP_HIDE_FROM_ABI __begin_node_pointer __get_begin() const { return static_cast<__begin_node_pointer>(static_cast<__void_pointer>(__ptr_)); @@ -416,10 +418,9 @@ class _LIBCPP_TEMPLATE_VIS __forward_list_const_iterator { typedef typename __traits::__node_type __node_type; typedef typename __traits::__node_pointer __node_pointer; typedef typename __traits::__begin_node_pointer __begin_node_pointer; - typedef typename __traits::__iter_node_pointer __iter_node_pointer; typedef typename __traits::__void_pointer __void_pointer; - __iter_node_pointer __ptr_; + __begin_node_pointer __ptr_; _LIBCPP_HIDE_FROM_ABI __begin_node_pointer __get_begin() const { return static_cast<__begin_node_pointer>(static_cast<__void_pointer>(__ptr_)); @@ -491,27 +492,27 @@ protected: typedef __rebind_alloc<allocator_traits<allocator_type>, __begin_node> __begin_node_allocator; typedef typename allocator_traits<__begin_node_allocator>::pointer __begin_node_pointer; - __compressed_pair<__begin_node, __node_allocator> __before_begin_; + _LIBCPP_COMPRESSED_PAIR(__begin_node, __before_begin_, __node_allocator, __alloc_); _LIBCPP_HIDE_FROM_ABI __begin_node_pointer __before_begin() _NOEXCEPT { - return pointer_traits<__begin_node_pointer>::pointer_to(__before_begin_.first()); + return pointer_traits<__begin_node_pointer>::pointer_to(__before_begin_); } _LIBCPP_HIDE_FROM_ABI __begin_node_pointer __before_begin() const _NOEXCEPT { - return pointer_traits<__begin_node_pointer>::pointer_to(const_cast<__begin_node&>(__before_begin_.first())); + return pointer_traits<__begin_node_pointer>::pointer_to(const_cast<__begin_node&>(__before_begin_)); } - _LIBCPP_HIDE_FROM_ABI __node_allocator& __alloc() _NOEXCEPT { return __before_begin_.second(); } - _LIBCPP_HIDE_FROM_ABI const __node_allocator& __alloc() const _NOEXCEPT { return __before_begin_.second(); } + _LIBCPP_HIDE_FROM_ABI __node_allocator& __alloc() _NOEXCEPT { return __alloc_; } + _LIBCPP_HIDE_FROM_ABI const __node_allocator& __alloc() const _NOEXCEPT { return __alloc_; } typedef __forward_list_iterator<__node_pointer> iterator; typedef __forward_list_const_iterator<__node_pointer> const_iterator; _LIBCPP_HIDE_FROM_ABI __forward_list_base() _NOEXCEPT_(is_nothrow_default_constructible<__node_allocator>::value) - : __before_begin_(__begin_node(), __default_init_tag()) {} + : __before_begin_(__begin_node()) {} _LIBCPP_HIDE_FROM_ABI explicit __forward_list_base(const allocator_type& __a) - : __before_begin_(__begin_node(), __node_allocator(__a)) {} + : __before_begin_(__begin_node()), __alloc_(__node_allocator(__a)) {} _LIBCPP_HIDE_FROM_ABI explicit __forward_list_base(const __node_allocator& __a) - : __before_begin_(__begin_node(), __a) {} + : __before_begin_(__begin_node()), __alloc_(__a) {} public: #ifndef _LIBCPP_CXX03_LANG @@ -593,13 +594,13 @@ private: template <class _Tp, class _Alloc> inline __forward_list_base<_Tp, _Alloc>::__forward_list_base(__forward_list_base&& __x) noexcept( is_nothrow_move_constructible<__node_allocator>::value) - : __before_begin_(std::move(__x.__before_begin_)) { + : __before_begin_(std::move(__x.__before_begin_)), __alloc_(std::move(__x.__alloc_)) { __x.__before_begin()->__next_ = nullptr; } template <class _Tp, class _Alloc> inline __forward_list_base<_Tp, _Alloc>::__forward_list_base(__forward_list_base&& __x, const allocator_type& __a) - : __before_begin_(__begin_node(), __node_allocator(__a)) { + : __before_begin_(__begin_node()), __alloc_(__node_allocator(__a)) { if (__alloc() == __x.__alloc()) { __before_begin()->__next_ = __x.__before_begin()->__next_; __x.__before_begin()->__next_ = nullptr; diff --git a/libcxx/include/fstream b/libcxx/include/fstream index a77b7ce..1cc3bd88 100644 --- a/libcxx/include/fstream +++ b/libcxx/include/fstream @@ -215,7 +215,7 @@ _LIBCPP_PUSH_MACROS # define _LIBCPP_HAS_NO_OFF_T_FUNCTIONS #endif -#if !defined(_LIBCPP_HAS_NO_FILESYSTEM) +#if !defined(_LIBCPP_HAS_NO_FILESYSTEM) && !defined(_LIBCPP_HAS_NO_LOCALIZATION) _LIBCPP_BEGIN_NAMESPACE_STD @@ -1560,7 +1560,7 @@ extern template class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_filebuf<char>; _LIBCPP_END_NAMESPACE_STD -#endif // _LIBCPP_HAS_NO_FILESYSTEM +#endif // !defined(_LIBCPP_HAS_NO_FILESYSTEM) && !defined(_LIBCPP_HAS_NO_LOCALIZATION) _LIBCPP_POP_MACROS diff --git a/libcxx/include/future b/libcxx/include/future index 8eadbcb..e14622e 100644 --- a/libcxx/include/future +++ b/libcxx/include/future @@ -1400,13 +1400,13 @@ class __packaged_task_func; template <class _Fp, class _Alloc, class _Rp, class... _ArgTypes> class __packaged_task_func<_Fp, _Alloc, _Rp(_ArgTypes...)> : public __packaged_task_base<_Rp(_ArgTypes...)> { - __compressed_pair<_Fp, _Alloc> __f_; + _LIBCPP_COMPRESSED_PAIR(_Fp, __func_, _Alloc, __alloc_); public: - _LIBCPP_HIDE_FROM_ABI explicit __packaged_task_func(const _Fp& __f) : __f_(__f, __default_init_tag()) {} - _LIBCPP_HIDE_FROM_ABI explicit __packaged_task_func(_Fp&& __f) : __f_(std::move(__f), __default_init_tag()) {} - _LIBCPP_HIDE_FROM_ABI __packaged_task_func(const _Fp& __f, const _Alloc& __a) : __f_(__f, __a) {} - _LIBCPP_HIDE_FROM_ABI __packaged_task_func(_Fp&& __f, const _Alloc& __a) : __f_(std::move(__f), __a) {} + _LIBCPP_HIDE_FROM_ABI explicit __packaged_task_func(const _Fp& __f) : __func_(__f) {} + _LIBCPP_HIDE_FROM_ABI explicit __packaged_task_func(_Fp&& __f) : __func_(std::move(__f)) {} + _LIBCPP_HIDE_FROM_ABI __packaged_task_func(const _Fp& __f, const _Alloc& __a) : __func_(__f), __alloc_(__a) {} + _LIBCPP_HIDE_FROM_ABI __packaged_task_func(_Fp&& __f, const _Alloc& __a) : __func_(std::move(__f)), __alloc_(__a) {} _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual void __move_to(__packaged_task_base<_Rp(_ArgTypes...)>*) _NOEXCEPT; _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual void destroy(); _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual void destroy_deallocate(); @@ -1416,12 +1416,13 @@ public: template <class _Fp, class _Alloc, class _Rp, class... _ArgTypes> void __packaged_task_func<_Fp, _Alloc, _Rp(_ArgTypes...)>::__move_to( __packaged_task_base<_Rp(_ArgTypes...)>* __p) _NOEXCEPT { - ::new ((void*)__p) __packaged_task_func(std::move(__f_.first()), std::move(__f_.second())); + ::new ((void*)__p) __packaged_task_func(std::move(__func_), std::move(__alloc_)); } template <class _Fp, class _Alloc, class _Rp, class... _ArgTypes> void __packaged_task_func<_Fp, _Alloc, _Rp(_ArgTypes...)>::destroy() { - __f_.~__compressed_pair<_Fp, _Alloc>(); + __func_.~_Fp(); + __alloc_.~_Alloc(); } template <class _Fp, class _Alloc, class _Rp, class... _ArgTypes> @@ -1429,14 +1430,15 @@ void __packaged_task_func<_Fp, _Alloc, _Rp(_ArgTypes...)>::destroy_deallocate() typedef typename __allocator_traits_rebind<_Alloc, __packaged_task_func>::type _Ap; typedef allocator_traits<_Ap> _ATraits; typedef pointer_traits<typename _ATraits::pointer> _PTraits; - _Ap __a(__f_.second()); - __f_.~__compressed_pair<_Fp, _Alloc>(); + _Ap __a(__alloc_); + __func_.~_Fp(); + __alloc_.~_Alloc(); __a.deallocate(_PTraits::pointer_to(*this), 1); } template <class _Fp, class _Alloc, class _Rp, class... _ArgTypes> _Rp __packaged_task_func<_Fp, _Alloc, _Rp(_ArgTypes...)>::operator()(_ArgTypes&&... __arg) { - return std::__invoke(__f_.first(), std::forward<_ArgTypes>(__arg)...); + return std::__invoke(__func_, std::forward<_ArgTypes>(__arg)...); } template <class _Callable> diff --git a/libcxx/include/iomanip b/libcxx/include/iomanip index fb4f15b..70c8c35 100644 --- a/libcxx/include/iomanip +++ b/libcxx/include/iomanip @@ -43,12 +43,15 @@ template <class charT, class traits, class Allocator> */ #include <__config> -#include <istream> -#include <version> -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif +#ifndef _LIBCPP_HAS_NO_LOCALIZATION + +# include <istream> +# include <version> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif _LIBCPP_BEGIN_NAMESPACE_STD @@ -231,9 +234,9 @@ public: template <class _CharT, class _Traits, class _MoneyT> _LIBCPP_HIDE_FROM_ABI basic_istream<_CharT, _Traits>& operator>>(basic_istream<_CharT, _Traits>& __is, const __iom_t7<_MoneyT>& __x) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS typename basic_istream<_CharT, _Traits>::sentry __s(__is); if (__s) { typedef istreambuf_iterator<_CharT, _Traits> _Ip; @@ -243,11 +246,11 @@ operator>>(basic_istream<_CharT, _Traits>& __is, const __iom_t7<_MoneyT>& __x) { __mf.get(_Ip(__is), _Ip(), __x.__intl_, __is, __err, __x.__mon_); __is.setstate(__err); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __is.__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return __is; } @@ -280,9 +283,9 @@ public: template <class _CharT, class _Traits, class _MoneyT> _LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& operator<<(basic_ostream<_CharT, _Traits>& __os, const __iom_t8<_MoneyT>& __x) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS typename basic_ostream<_CharT, _Traits>::sentry __s(__os); if (__s) { typedef ostreambuf_iterator<_CharT, _Traits> _Op; @@ -291,11 +294,11 @@ operator<<(basic_ostream<_CharT, _Traits>& __os, const __iom_t8<_MoneyT>& __x) { if (__mf.put(_Op(__os), __x.__intl_, __os, __os.fill(), __x.__mon_).failed()) __os.setstate(ios_base::badbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __os.__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return __os; } @@ -328,9 +331,9 @@ public: template <class _CharT, class _Traits> _LIBCPP_HIDE_FROM_ABI basic_istream<_CharT, _Traits>& operator>>(basic_istream<_CharT, _Traits>& __is, const __iom_t9<_CharT>& __x) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS typename basic_istream<_CharT, _Traits>::sentry __s(__is); if (__s) { typedef istreambuf_iterator<_CharT, _Traits> _Ip; @@ -340,11 +343,11 @@ operator>>(basic_istream<_CharT, _Traits>& __is, const __iom_t9<_CharT>& __x) { __tf.get(_Ip(__is), _Ip(), __is, __err, __x.__tm_, __x.__fmt_, __x.__fmt_ + _Traits::length(__x.__fmt_)); __is.setstate(__err); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __is.__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return __is; } @@ -377,9 +380,9 @@ public: template <class _CharT, class _Traits> _LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& operator<<(basic_ostream<_CharT, _Traits>& __os, const __iom_t10<_CharT>& __x) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS typename basic_ostream<_CharT, _Traits>::sentry __s(__os); if (__s) { typedef ostreambuf_iterator<_CharT, _Traits> _Op; @@ -389,11 +392,11 @@ operator<<(basic_ostream<_CharT, _Traits>& __os, const __iom_t10<_CharT>& __x) { .failed()) __os.setstate(ios_base::badbit); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __os.__set_badbit_and_consider_rethrow(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS return __os; } @@ -505,7 +508,7 @@ __quoted(basic_string<_CharT, _Traits, _Allocator>& __s, _CharT __delim = _CharT return __quoted_proxy<_CharT, _Traits, _Allocator>(__s, __delim, __escape); } -#if _LIBCPP_STD_VER >= 14 +# if _LIBCPP_STD_VER >= 14 template <class _CharT> _LIBCPP_HIDE_FROM_ABI auto quoted(const _CharT* __s, _CharT __delim = _CharT('"'), _CharT __escape = _CharT('\\')) { @@ -535,8 +538,10 @@ quoted(basic_string_view<_CharT, _Traits> __sv, _CharT __delim = _CharT('"'), _C return __quoted_output_proxy<_CharT, _Traits>(__sv.data(), __sv.data() + __sv.size(), __delim, __escape); } -#endif // _LIBCPP_STD_VER >= 14 +# endif // _LIBCPP_STD_VER >= 14 _LIBCPP_END_NAMESPACE_STD +#endif // !_LIBCPP_HAS_NO_LOCALIZATION + #endif // _LIBCPP_IOMANIP diff --git a/libcxx/include/istream b/libcxx/include/istream index 7c65a24..8ee29ba 100644 --- a/libcxx/include/istream +++ b/libcxx/include/istream @@ -159,26 +159,29 @@ template <class Stream, class T> */ #include <__config> -#include <__fwd/istream.h> -#include <__iterator/istreambuf_iterator.h> -#include <__ostream/basic_ostream.h> -#include <__type_traits/conjunction.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_base_of.h> -#include <__type_traits/make_unsigned.h> -#include <__utility/declval.h> -#include <__utility/forward.h> -#include <bitset> -#include <ios> -#include <locale> -#include <version> - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif + +#if !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +# include <__fwd/istream.h> +# include <__iterator/istreambuf_iterator.h> +# include <__ostream/basic_ostream.h> +# include <__type_traits/conjunction.h> +# include <__type_traits/enable_if.h> +# include <__type_traits/is_base_of.h> +# include <__type_traits/make_unsigned.h> +# include <__utility/declval.h> +# include <__utility/forward.h> +# include <bitset> +# include <ios> +# include <locale> +# include <version> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif _LIBCPP_PUSH_MACROS -#include <__undef_macros> +# include <__undef_macros> _LIBCPP_BEGIN_NAMESPACE_STD @@ -354,13 +357,13 @@ __input_arithmetic(basic_istream<_CharT, _Traits>& __is, _Tp& __n) { ios_base::iostate __state = ios_base::goodbit; typename basic_istream<_CharT, _Traits>::sentry __s(__is); if (__s) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS typedef istreambuf_iterator<_CharT, _Traits> _Ip; typedef num_get<_CharT, _Ip> _Fp; std::use_facet<_Fp>(__is.getloc()).get(_Ip(__is), _Ip(), __is, __state, __n); -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; __is.__setstate_nothrow(__state); @@ -368,7 +371,7 @@ __input_arithmetic(basic_istream<_CharT, _Traits>& __is, _Tp& __n) { throw; } } -#endif +# endif __is.setstate(__state); } return __is; @@ -435,9 +438,9 @@ __input_arithmetic_with_numeric_limits(basic_istream<_CharT, _Traits>& __is, _Tp ios_base::iostate __state = ios_base::goodbit; typename basic_istream<_CharT, _Traits>::sentry __s(__is); if (__s) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS typedef istreambuf_iterator<_CharT, _Traits> _Ip; typedef num_get<_CharT, _Ip> _Fp; long __temp; @@ -451,7 +454,7 @@ __input_arithmetic_with_numeric_limits(basic_istream<_CharT, _Traits>& __is, _Tp } else { __n = static_cast<_Tp>(__temp); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; __is.__setstate_nothrow(__state); @@ -459,7 +462,7 @@ __input_arithmetic_with_numeric_limits(basic_istream<_CharT, _Traits>& __is, _Tp throw; } } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS __is.setstate(__state); } return __is; @@ -481,9 +484,9 @@ __input_c_string(basic_istream<_CharT, _Traits>& __is, _CharT* __p, size_t __n) ios_base::iostate __state = ios_base::goodbit; typename basic_istream<_CharT, _Traits>::sentry __sen(__is); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif +# endif _CharT* __s = __p; const ctype<_CharT>& __ct = std::use_facet<ctype<_CharT> >(__is.getloc()); while (__s != __p + (__n - 1)) { @@ -502,7 +505,7 @@ __input_c_string(basic_istream<_CharT, _Traits>& __is, _CharT* __p, size_t __n) __is.width(0); if (__s == __p) __state |= ios_base::failbit; -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; __is.__setstate_nothrow(__state); @@ -510,13 +513,13 @@ __input_c_string(basic_istream<_CharT, _Traits>& __is, _CharT* __p, size_t __n) throw; } } -#endif +# endif __is.setstate(__state); } return __is; } -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 template <class _CharT, class _Traits, size_t _Np> inline _LIBCPP_HIDE_FROM_ABI basic_istream<_CharT, _Traits>& @@ -539,7 +542,7 @@ operator>>(basic_istream<char, _Traits>& __is, signed char (&__buf)[_Np]) { return __is >> (char(&)[_Np])__buf; } -#else +# else template <class _CharT, class _Traits> inline _LIBCPP_HIDE_FROM_ABI basic_istream<_CharT, _Traits>& @@ -562,22 +565,22 @@ operator>>(basic_istream<char, _Traits>& __is, signed char* __s) { return __is >> (char*)__s; } -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 template <class _CharT, class _Traits> _LIBCPP_HIDE_FROM_ABI basic_istream<_CharT, _Traits>& operator>>(basic_istream<_CharT, _Traits>& __is, _CharT& __c) { ios_base::iostate __state = ios_base::goodbit; typename basic_istream<_CharT, _Traits>::sentry __sen(__is); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif +# endif typename _Traits::int_type __i = __is.rdbuf()->sbumpc(); if (_Traits::eq_int_type(__i, _Traits::eof())) __state |= ios_base::eofbit | ios_base::failbit; else __c = _Traits::to_char_type(__i); -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; __is.__setstate_nothrow(__state); @@ -585,7 +588,7 @@ _LIBCPP_HIDE_FROM_ABI basic_istream<_CharT, _Traits>& operator>>(basic_istream<_ throw; } } -#endif +# endif __is.setstate(__state); } return __is; @@ -611,9 +614,9 @@ basic_istream<_CharT, _Traits>::operator>>(basic_streambuf<char_type, traits_typ sentry __s(*this, true); if (__s) { if (__sb) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS while (true) { typename traits_type::int_type __i = this->rdbuf()->sgetc(); if (traits_type::eq_int_type(__i, _Traits::eof())) { @@ -627,7 +630,7 @@ basic_istream<_CharT, _Traits>::operator>>(basic_streambuf<char_type, traits_typ } if (__gc_ == 0) __state |= ios_base::failbit; -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; if (__gc_ == 0) @@ -638,7 +641,7 @@ basic_istream<_CharT, _Traits>::operator>>(basic_streambuf<char_type, traits_typ throw; } } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS } else { __state |= ios_base::failbit; } @@ -654,22 +657,22 @@ typename basic_istream<_CharT, _Traits>::int_type basic_istream<_CharT, _Traits> int_type __r = traits_type::eof(); sentry __s(*this, true); if (__s) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif +# endif __r = this->rdbuf()->sbumpc(); if (traits_type::eq_int_type(__r, traits_type::eof())) __state |= ios_base::failbit | ios_base::eofbit; else __gc_ = 1; -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__setstate_nothrow(this->rdstate() | ios_base::badbit); if (this->exceptions() & ios_base::badbit) { throw; } } -#endif +# endif this->setstate(__state); } return __r; @@ -682,9 +685,9 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::get(char_type* _ sentry __sen(*this, true); if (__sen) { if (__n > 0) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif +# endif while (__gc_ < __n - 1) { int_type __i = this->rdbuf()->sgetc(); if (traits_type::eq_int_type(__i, traits_type::eof())) { @@ -700,7 +703,7 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::get(char_type* _ } if (__gc_ == 0) __state |= ios_base::failbit; -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; this->__setstate_nothrow(__state); @@ -710,7 +713,7 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::get(char_type* _ throw; } } -#endif +# endif } else { __state |= ios_base::failbit; } @@ -731,9 +734,9 @@ basic_istream<_CharT, _Traits>::get(basic_streambuf<char_type, traits_type>& __s __gc_ = 0; sentry __sen(*this, true); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS while (true) { typename traits_type::int_type __i = this->rdbuf()->sgetc(); if (traits_type::eq_int_type(__i, traits_type::eof())) { @@ -748,12 +751,12 @@ basic_istream<_CharT, _Traits>::get(basic_streambuf<char_type, traits_type>& __s __inc_gcount(); this->rdbuf()->sbumpc(); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; // according to the spec, exceptions here are caught but not rethrown } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS if (__gc_ == 0) __state |= ios_base::failbit; this->setstate(__state); @@ -768,9 +771,9 @@ basic_istream<_CharT, _Traits>::getline(char_type* __s, streamsize __n, char_typ __gc_ = 0; sentry __sen(*this, true); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS while (true) { typename traits_type::int_type __i = this->rdbuf()->sgetc(); if (traits_type::eq_int_type(__i, traits_type::eof())) { @@ -791,7 +794,7 @@ basic_istream<_CharT, _Traits>::getline(char_type* __s, streamsize __n, char_typ this->rdbuf()->sbumpc(); __inc_gcount(); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; this->__setstate_nothrow(__state); @@ -803,7 +806,7 @@ basic_istream<_CharT, _Traits>::getline(char_type* __s, streamsize __n, char_typ throw; } } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS } if (__n > 0) *__s = char_type(); @@ -819,9 +822,9 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::ignore(streamsiz __gc_ = 0; sentry __sen(*this, true); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS if (__n == numeric_limits<streamsize>::max()) { while (true) { typename traits_type::int_type __i = this->rdbuf()->sbumpc(); @@ -845,7 +848,7 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::ignore(streamsiz break; } } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; this->__setstate_nothrow(__state); @@ -853,7 +856,7 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::ignore(streamsiz throw; } } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS this->setstate(__state); } return *this; @@ -866,13 +869,13 @@ typename basic_istream<_CharT, _Traits>::int_type basic_istream<_CharT, _Traits> int_type __r = traits_type::eof(); sentry __sen(*this, true); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS __r = this->rdbuf()->sgetc(); if (traits_type::eq_int_type(__r, traits_type::eof())) __state |= ios_base::eofbit; -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; this->__setstate_nothrow(__state); @@ -880,7 +883,7 @@ typename basic_istream<_CharT, _Traits>::int_type basic_istream<_CharT, _Traits> throw; } } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS this->setstate(__state); } return __r; @@ -892,13 +895,13 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::read(char_type* __gc_ = 0; sentry __sen(*this, true); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS __gc_ = this->rdbuf()->sgetn(__s, __n); if (__gc_ != __n) __state |= ios_base::failbit | ios_base::eofbit; -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; this->__setstate_nothrow(__state); @@ -906,7 +909,7 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::read(char_type* throw; } } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS } else { __state |= ios_base::failbit; } @@ -920,9 +923,9 @@ streamsize basic_istream<_CharT, _Traits>::readsome(char_type* __s, streamsize _ __gc_ = 0; sentry __sen(*this, true); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS streamsize __c = this->rdbuf()->in_avail(); switch (__c) { case -1: @@ -937,7 +940,7 @@ streamsize basic_istream<_CharT, _Traits>::readsome(char_type* __s, streamsize _ __state |= ios_base::failbit | ios_base::eofbit; break; } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; this->__setstate_nothrow(__state); @@ -945,7 +948,7 @@ streamsize basic_istream<_CharT, _Traits>::readsome(char_type* __s, streamsize _ throw; } } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS } else { __state |= ios_base::failbit; } @@ -960,12 +963,12 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::putback(char_typ this->clear(__state); sentry __sen(*this, true); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS if (this->rdbuf() == nullptr || this->rdbuf()->sputbackc(__c) == traits_type::eof()) __state |= ios_base::badbit; -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; this->__setstate_nothrow(__state); @@ -973,7 +976,7 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::putback(char_typ throw; } } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS } else { __state |= ios_base::failbit; } @@ -988,12 +991,12 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::unget() { this->clear(__state); sentry __sen(*this, true); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS if (this->rdbuf() == nullptr || this->rdbuf()->sungetc() == traits_type::eof()) __state |= ios_base::badbit; -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; this->__setstate_nothrow(__state); @@ -1001,7 +1004,7 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::unget() { throw; } } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS } else { __state |= ios_base::failbit; } @@ -1018,14 +1021,14 @@ int basic_istream<_CharT, _Traits>::sync() { int __r = 0; if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS if (this->rdbuf()->pubsync() == -1) { __state |= ios_base::badbit; __r = -1; } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; this->__setstate_nothrow(__state); @@ -1033,7 +1036,7 @@ int basic_istream<_CharT, _Traits>::sync() { throw; } } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS this->setstate(__state); } return __r; @@ -1045,11 +1048,11 @@ typename basic_istream<_CharT, _Traits>::pos_type basic_istream<_CharT, _Traits> pos_type __r(-1); sentry __sen(*this, true); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS __r = this->rdbuf()->pubseekoff(0, ios_base::cur, ios_base::in); -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; this->__setstate_nothrow(__state); @@ -1057,7 +1060,7 @@ typename basic_istream<_CharT, _Traits>::pos_type basic_istream<_CharT, _Traits> throw; } } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS this->setstate(__state); } return __r; @@ -1069,12 +1072,12 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::seekg(pos_type _ this->clear(__state); sentry __sen(*this, true); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS if (this->rdbuf()->pubseekpos(__pos, ios_base::in) == pos_type(-1)) __state |= ios_base::failbit; -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; this->__setstate_nothrow(__state); @@ -1082,7 +1085,7 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::seekg(pos_type _ throw; } } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS this->setstate(__state); } return *this; @@ -1094,12 +1097,12 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::seekg(off_type _ this->clear(__state); sentry __sen(*this, true); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS if (this->rdbuf()->pubseekoff(__off, __dir, ios_base::in) == pos_type(-1)) __state |= ios_base::failbit; -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; this->__setstate_nothrow(__state); @@ -1107,7 +1110,7 @@ basic_istream<_CharT, _Traits>& basic_istream<_CharT, _Traits>::seekg(off_type _ throw; } } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS this->setstate(__state); } return *this; @@ -1118,9 +1121,9 @@ _LIBCPP_HIDE_FROM_ABI basic_istream<_CharT, _Traits>& ws(basic_istream<_CharT, _ ios_base::iostate __state = ios_base::goodbit; typename basic_istream<_CharT, _Traits>::sentry __sen(__is, true); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS const ctype<_CharT>& __ct = std::use_facet<ctype<_CharT> >(__is.getloc()); while (true) { typename _Traits::int_type __i = __is.rdbuf()->sgetc(); @@ -1132,7 +1135,7 @@ _LIBCPP_HIDE_FROM_ABI basic_istream<_CharT, _Traits>& ws(basic_istream<_CharT, _ break; __is.rdbuf()->sbumpc(); } -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; __is.__setstate_nothrow(__state); @@ -1140,7 +1143,7 @@ _LIBCPP_HIDE_FROM_ABI basic_istream<_CharT, _Traits>& ws(basic_istream<_CharT, _ throw; } } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS __is.setstate(__state); } return __is; @@ -1208,9 +1211,9 @@ operator>>(basic_istream<_CharT, _Traits>& __is, basic_string<_CharT, _Traits, _ ios_base::iostate __state = ios_base::goodbit; typename basic_istream<_CharT, _Traits>::sentry __sen(__is); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif +# endif __str.clear(); using _Size = typename basic_string<_CharT, _Traits, _Allocator>::size_type; streamsize const __width = __is.width(); @@ -1240,7 +1243,7 @@ operator>>(basic_istream<_CharT, _Traits>& __is, basic_string<_CharT, _Traits, _ __is.width(0); if (__c == 0) __state |= ios_base::failbit; -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; __is.__setstate_nothrow(__state); @@ -1248,7 +1251,7 @@ operator>>(basic_istream<_CharT, _Traits>& __is, basic_string<_CharT, _Traits, _ throw; } } -#endif +# endif __is.setstate(__state); } return __is; @@ -1260,9 +1263,9 @@ getline(basic_istream<_CharT, _Traits>& __is, basic_string<_CharT, _Traits, _All ios_base::iostate __state = ios_base::goodbit; typename basic_istream<_CharT, _Traits>::sentry __sen(__is, true); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif +# endif __str.clear(); streamsize __extr = 0; while (true) { @@ -1283,7 +1286,7 @@ getline(basic_istream<_CharT, _Traits>& __is, basic_string<_CharT, _Traits, _All } if (__extr == 0) __state |= ios_base::failbit; -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; __is.__setstate_nothrow(__state); @@ -1291,7 +1294,7 @@ getline(basic_istream<_CharT, _Traits>& __is, basic_string<_CharT, _Traits, _All throw; } } -#endif +# endif __is.setstate(__state); } return __is; @@ -1321,9 +1324,9 @@ operator>>(basic_istream<_CharT, _Traits>& __is, bitset<_Size>& __x) { ios_base::iostate __state = ios_base::goodbit; typename basic_istream<_CharT, _Traits>::sentry __sen(__is); if (__sen) { -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif +# endif basic_string<_CharT, _Traits> __str; const ctype<_CharT>& __ct = std::use_facet<ctype<_CharT> >(__is.getloc()); size_t __c = 0; @@ -1345,7 +1348,7 @@ operator>>(basic_istream<_CharT, _Traits>& __is, bitset<_Size>& __x) { __x = bitset<_Size>(__str); if (_Size > 0 && __c == 0) __state |= ios_base::failbit; -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { __state |= ios_base::badbit; __is.__setstate_nothrow(__state); @@ -1353,20 +1356,22 @@ operator>>(basic_istream<_CharT, _Traits>& __is, bitset<_Size>& __x) { throw; } } -#endif +# endif __is.setstate(__state); } return __is; } extern template class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_istream<char>; -#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS +# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS extern template class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_istream<wchar_t>; -#endif +# endif extern template class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_iostream<char>; _LIBCPP_END_NAMESPACE_STD +#endif // !defined(_LIBCPP_HAS_NO_LOCALIZATION) + #if !defined(_LIBCPP_REMOVE_TRANSITIVE_INCLUDES) && _LIBCPP_STD_VER <= 20 # include <concepts> # include <iosfwd> diff --git a/libcxx/include/list b/libcxx/include/list index dc3b679..05234f7 100644 --- a/libcxx/include/list +++ b/libcxx/include/list @@ -272,19 +272,21 @@ struct __list_node_pointer_traits { typedef __rebind_pointer_t<_VoidPtr, __list_node<_Tp, _VoidPtr> > __node_pointer; typedef __rebind_pointer_t<_VoidPtr, __list_node_base<_Tp, _VoidPtr> > __base_pointer; -#if defined(_LIBCPP_ABI_LIST_REMOVE_NODE_POINTER_UB) - typedef __base_pointer __link_pointer; -#else - typedef __conditional_t<is_pointer<_VoidPtr>::value, __base_pointer, __node_pointer> __link_pointer; +// TODO(LLVM 22): Remove this check +#ifndef _LIBCPP_ABI_LIST_REMOVE_NODE_POINTER_UB + static_assert(sizeof(__node_pointer) == sizeof(__node_pointer) && _LIBCPP_ALIGNOF(__base_pointer) == + _LIBCPP_ALIGNOF(__node_pointer), + "It looks like you are using std::list with a fancy pointer type that thas a different representation " + "depending on whether it points to a list base pointer or a list node pointer (both of which are " + "implementation details of the standard library). This means that your ABI is being broken between " + "LLVM 19 and LLVM 20. If you don't care about your ABI being broken, define the " + "_LIBCPP_ABI_LIST_REMOVE_NODE_POINTER_UB macro to silence this diagnostic."); #endif - typedef __conditional_t<is_same<__link_pointer, __node_pointer>::value, __base_pointer, __node_pointer> - __non_link_pointer; - - static _LIBCPP_HIDE_FROM_ABI __link_pointer __unsafe_link_pointer_cast(__link_pointer __p) { return __p; } + static _LIBCPP_HIDE_FROM_ABI __base_pointer __unsafe_link_pointer_cast(__base_pointer __p) { return __p; } - static _LIBCPP_HIDE_FROM_ABI __link_pointer __unsafe_link_pointer_cast(__non_link_pointer __p) { - return static_cast<__link_pointer>(static_cast<_VoidPtr>(__p)); + static _LIBCPP_HIDE_FROM_ABI __base_pointer __unsafe_link_pointer_cast(__node_pointer __p) { + return static_cast<__base_pointer>(static_cast<_VoidPtr>(__p)); } }; @@ -293,16 +295,13 @@ struct __list_node_base { typedef __list_node_pointer_traits<_Tp, _VoidPtr> _NodeTraits; typedef typename _NodeTraits::__node_pointer __node_pointer; typedef typename _NodeTraits::__base_pointer __base_pointer; - typedef typename _NodeTraits::__link_pointer __link_pointer; - __link_pointer __prev_; - __link_pointer __next_; + __base_pointer __prev_; + __base_pointer __next_; - _LIBCPP_HIDE_FROM_ABI __list_node_base() - : __prev_(_NodeTraits::__unsafe_link_pointer_cast(__self())), - __next_(_NodeTraits::__unsafe_link_pointer_cast(__self())) {} + _LIBCPP_HIDE_FROM_ABI __list_node_base() : __prev_(__self()), __next_(__self()) {} - _LIBCPP_HIDE_FROM_ABI explicit __list_node_base(__link_pointer __prev, __link_pointer __next) + _LIBCPP_HIDE_FROM_ABI explicit __list_node_base(__base_pointer __prev, __base_pointer __next) : __prev_(__prev), __next_(__next) {} _LIBCPP_HIDE_FROM_ABI __base_pointer __self() { return pointer_traits<__base_pointer>::pointer_to(*this); } @@ -333,12 +332,12 @@ public: #endif typedef __list_node_base<_Tp, _VoidPtr> __base; - typedef typename __base::__link_pointer __link_pointer; + typedef typename __base::__base_pointer __base_pointer; - _LIBCPP_HIDE_FROM_ABI explicit __list_node(__link_pointer __prev, __link_pointer __next) : __base(__prev, __next) {} + _LIBCPP_HIDE_FROM_ABI explicit __list_node(__base_pointer __prev, __base_pointer __next) : __base(__prev, __next) {} _LIBCPP_HIDE_FROM_ABI ~__list_node() {} - _LIBCPP_HIDE_FROM_ABI __link_pointer __as_link() { return static_cast<__link_pointer>(__base::__self()); } + _LIBCPP_HIDE_FROM_ABI __base_pointer __as_link() { return __base::__self(); } }; template <class _Tp, class _Alloc = allocator<_Tp> > @@ -351,11 +350,11 @@ class _LIBCPP_TEMPLATE_VIS __list_const_iterator; template <class _Tp, class _VoidPtr> class _LIBCPP_TEMPLATE_VIS __list_iterator { typedef __list_node_pointer_traits<_Tp, _VoidPtr> _NodeTraits; - typedef typename _NodeTraits::__link_pointer __link_pointer; + typedef typename _NodeTraits::__base_pointer __base_pointer; - __link_pointer __ptr_; + __base_pointer __ptr_; - _LIBCPP_HIDE_FROM_ABI explicit __list_iterator(__link_pointer __p) _NOEXCEPT : __ptr_(__p) {} + _LIBCPP_HIDE_FROM_ABI explicit __list_iterator(__base_pointer __p) _NOEXCEPT : __ptr_(__p) {} template <class, class> friend class list; @@ -409,11 +408,11 @@ public: template <class _Tp, class _VoidPtr> class _LIBCPP_TEMPLATE_VIS __list_const_iterator { typedef __list_node_pointer_traits<_Tp, _VoidPtr> _NodeTraits; - typedef typename _NodeTraits::__link_pointer __link_pointer; + typedef typename _NodeTraits::__base_pointer __base_pointer; - __link_pointer __ptr_; + __base_pointer __ptr_; - _LIBCPP_HIDE_FROM_ABI explicit __list_const_iterator(__link_pointer __p) _NOEXCEPT : __ptr_(__p) {} + _LIBCPP_HIDE_FROM_ABI explicit __list_const_iterator(__base_pointer __p) _NOEXCEPT : __ptr_(__p) {} template <class, class> friend class list; @@ -486,8 +485,8 @@ protected: typedef typename __node_alloc_traits::pointer __node_pointer; typedef typename __node_alloc_traits::pointer __node_const_pointer; typedef __list_node_pointer_traits<value_type, __void_pointer> __node_pointer_traits; - typedef typename __node_pointer_traits::__link_pointer __link_pointer; - typedef __link_pointer __link_const_pointer; + typedef typename __node_pointer_traits::__base_pointer __base_pointer; + typedef __base_pointer __link_const_pointer; typedef typename __alloc_traits::pointer pointer; typedef typename __alloc_traits::const_pointer const_pointer; typedef typename __alloc_traits::difference_type difference_type; @@ -498,21 +497,21 @@ protected: "internal allocator type must differ from user-specified type; otherwise overload resolution breaks"); __node_base __end_; - __compressed_pair<size_type, __node_allocator> __size_alloc_; + _LIBCPP_COMPRESSED_PAIR(size_type, __size_, __node_allocator, __node_alloc_); - _LIBCPP_HIDE_FROM_ABI __link_pointer __end_as_link() const _NOEXCEPT { + _LIBCPP_HIDE_FROM_ABI __base_pointer __end_as_link() const _NOEXCEPT { return __node_pointer_traits::__unsafe_link_pointer_cast(const_cast<__node_base&>(__end_).__self()); } - _LIBCPP_HIDE_FROM_ABI size_type& __sz() _NOEXCEPT { return __size_alloc_.first(); } - _LIBCPP_HIDE_FROM_ABI const size_type& __sz() const _NOEXCEPT { return __size_alloc_.first(); } - _LIBCPP_HIDE_FROM_ABI __node_allocator& __node_alloc() _NOEXCEPT { return __size_alloc_.second(); } - _LIBCPP_HIDE_FROM_ABI const __node_allocator& __node_alloc() const _NOEXCEPT { return __size_alloc_.second(); } + _LIBCPP_HIDE_FROM_ABI size_type& __sz() _NOEXCEPT { return __size_; } + _LIBCPP_HIDE_FROM_ABI const size_type& __sz() const _NOEXCEPT { return __size_; } + _LIBCPP_HIDE_FROM_ABI __node_allocator& __node_alloc() _NOEXCEPT { return __node_alloc_; } + _LIBCPP_HIDE_FROM_ABI const __node_allocator& __node_alloc() const _NOEXCEPT { return __node_alloc_; } _LIBCPP_HIDE_FROM_ABI size_type __node_alloc_max_size() const _NOEXCEPT { return __node_alloc_traits::max_size(__node_alloc()); } - _LIBCPP_HIDE_FROM_ABI static void __unlink_nodes(__link_pointer __f, __link_pointer __l) _NOEXCEPT; + _LIBCPP_HIDE_FROM_ABI static void __unlink_nodes(__base_pointer __f, __base_pointer __l) _NOEXCEPT; _LIBCPP_HIDE_FROM_ABI __list_imp() _NOEXCEPT_(is_nothrow_default_constructible<__node_allocator>::value); _LIBCPP_HIDE_FROM_ABI __list_imp(const allocator_type& __a); @@ -549,7 +548,7 @@ protected: } template <class... _Args> - _LIBCPP_HIDE_FROM_ABI __node_pointer __create_node(__link_pointer __prev, __link_pointer __next, _Args&&... __args) { + _LIBCPP_HIDE_FROM_ABI __node_pointer __create_node(__base_pointer __prev, __base_pointer __next, _Args&&... __args) { __node_allocator& __alloc = __node_alloc(); __allocation_guard<__node_allocator> __guard(__alloc, 1); // Begin the lifetime of the node itself. Note that this doesn't begin the lifetime of the value @@ -594,24 +593,27 @@ private: // Unlink nodes [__f, __l] template <class _Tp, class _Alloc> -inline void __list_imp<_Tp, _Alloc>::__unlink_nodes(__link_pointer __f, __link_pointer __l) _NOEXCEPT { +inline void __list_imp<_Tp, _Alloc>::__unlink_nodes(__base_pointer __f, __base_pointer __l) _NOEXCEPT { __f->__prev_->__next_ = __l->__next_; __l->__next_->__prev_ = __f->__prev_; } template <class _Tp, class _Alloc> inline __list_imp<_Tp, _Alloc>::__list_imp() _NOEXCEPT_(is_nothrow_default_constructible<__node_allocator>::value) - : __size_alloc_(0, __default_init_tag()) {} + : __size_(0) {} template <class _Tp, class _Alloc> -inline __list_imp<_Tp, _Alloc>::__list_imp(const allocator_type& __a) : __size_alloc_(0, __node_allocator(__a)) {} +inline __list_imp<_Tp, _Alloc>::__list_imp(const allocator_type& __a) + : __size_(0), __node_alloc_(__node_allocator(__a)) {} template <class _Tp, class _Alloc> -inline __list_imp<_Tp, _Alloc>::__list_imp(const __node_allocator& __a) : __size_alloc_(0, __a) {} +inline __list_imp<_Tp, _Alloc>::__list_imp(const __node_allocator& __a) : __size_(0), __node_alloc_(__a) {} #ifndef _LIBCPP_CXX03_LANG template <class _Tp, class _Alloc> -inline __list_imp<_Tp, _Alloc>::__list_imp(__node_allocator&& __a) _NOEXCEPT : __size_alloc_(0, std::move(__a)) {} +inline __list_imp<_Tp, _Alloc>::__list_imp(__node_allocator&& __a) _NOEXCEPT + : __size_(0), + __node_alloc_(std::move(__a)) {} #endif template <class _Tp, class _Alloc> @@ -622,8 +624,8 @@ __list_imp<_Tp, _Alloc>::~__list_imp() { template <class _Tp, class _Alloc> void __list_imp<_Tp, _Alloc>::clear() _NOEXCEPT { if (!empty()) { - __link_pointer __f = __end_.__next_; - __link_pointer __l = __end_as_link(); + __base_pointer __f = __end_.__next_; + __base_pointer __l = __end_as_link(); __unlink_nodes(__f, __l->__prev_); __sz() = 0; while (__f != __l) { @@ -669,7 +671,7 @@ class _LIBCPP_TEMPLATE_VIS list : private __list_imp<_Tp, _Alloc> { typedef typename base::__node_alloc_traits __node_alloc_traits; typedef typename base::__node_base __node_base; typedef typename base::__node_base_pointer __node_base_pointer; - typedef typename base::__link_pointer __link_pointer; + typedef typename base::__base_pointer __base_pointer; public: typedef _Tp value_type; @@ -918,9 +920,9 @@ private: template <class _Iterator, class _Sentinel> _LIBCPP_HIDE_FROM_ABI iterator __insert_with_sentinel(const_iterator __p, _Iterator __f, _Sentinel __l); - _LIBCPP_HIDE_FROM_ABI static void __link_nodes(__link_pointer __p, __link_pointer __f, __link_pointer __l); - _LIBCPP_HIDE_FROM_ABI void __link_nodes_at_front(__link_pointer __f, __link_pointer __l); - _LIBCPP_HIDE_FROM_ABI void __link_nodes_at_back(__link_pointer __f, __link_pointer __l); + _LIBCPP_HIDE_FROM_ABI static void __link_nodes(__base_pointer __p, __base_pointer __f, __base_pointer __l); + _LIBCPP_HIDE_FROM_ABI void __link_nodes_at_front(__base_pointer __f, __base_pointer __l); + _LIBCPP_HIDE_FROM_ABI void __link_nodes_at_back(__base_pointer __f, __base_pointer __l); _LIBCPP_HIDE_FROM_ABI iterator __iterator(size_type __n); // TODO: Make this _LIBCPP_HIDE_FROM_ABI template <class _Comp> @@ -954,7 +956,7 @@ list(from_range_t, _Range&&, _Alloc = _Alloc()) -> list<ranges::range_value_t<_R // Link in nodes [__f, __l] just prior to __p template <class _Tp, class _Alloc> -inline void list<_Tp, _Alloc>::__link_nodes(__link_pointer __p, __link_pointer __f, __link_pointer __l) { +inline void list<_Tp, _Alloc>::__link_nodes(__base_pointer __p, __base_pointer __f, __base_pointer __l) { __p->__prev_->__next_ = __f; __f->__prev_ = __p->__prev_; __p->__prev_ = __l; @@ -963,7 +965,7 @@ inline void list<_Tp, _Alloc>::__link_nodes(__link_pointer __p, __link_pointer _ // Link in nodes [__f, __l] at the front of the list template <class _Tp, class _Alloc> -inline void list<_Tp, _Alloc>::__link_nodes_at_front(__link_pointer __f, __link_pointer __l) { +inline void list<_Tp, _Alloc>::__link_nodes_at_front(__base_pointer __f, __base_pointer __l) { __f->__prev_ = base::__end_as_link(); __l->__next_ = base::__end_.__next_; __l->__next_->__prev_ = __l; @@ -972,7 +974,7 @@ inline void list<_Tp, _Alloc>::__link_nodes_at_front(__link_pointer __f, __link_ // Link in nodes [__f, __l] at the back of the list template <class _Tp, class _Alloc> -inline void list<_Tp, _Alloc>::__link_nodes_at_back(__link_pointer __f, __link_pointer __l) { +inline void list<_Tp, _Alloc>::__link_nodes_at_back(__base_pointer __f, __base_pointer __l) { __l->__next_ = base::__end_as_link(); __f->__prev_ = base::__end_.__prev_; __f->__prev_->__next_ = __f; @@ -1164,7 +1166,7 @@ list<_Tp, _Alloc>::insert(const_iterator __p, size_type __n, const value_type& _ #ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { while (true) { - __link_pointer __prev = __e.__ptr_->__prev_; + __base_pointer __prev = __e.__ptr_->__prev_; __node_pointer __current = __e.__ptr_->__as_node(); this->__delete_node(__current); if (__prev == 0) @@ -1206,7 +1208,7 @@ list<_Tp, _Alloc>::__insert_with_sentinel(const_iterator __p, _Iterator __f, _Se #ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { while (true) { - __link_pointer __prev = __e.__ptr_->__prev_; + __base_pointer __prev = __e.__ptr_->__prev_; __node_pointer __current = __e.__ptr_->__as_node(); this->__delete_node(__current); if (__prev == 0) @@ -1225,7 +1227,7 @@ list<_Tp, _Alloc>::__insert_with_sentinel(const_iterator __p, _Iterator __f, _Se template <class _Tp, class _Alloc> void list<_Tp, _Alloc>::push_front(const value_type& __x) { __node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, __x); - __link_pointer __nl = __node->__as_link(); + __base_pointer __nl = __node->__as_link(); __link_nodes_at_front(__nl, __nl); ++base::__sz(); } @@ -1233,7 +1235,7 @@ void list<_Tp, _Alloc>::push_front(const value_type& __x) { template <class _Tp, class _Alloc> void list<_Tp, _Alloc>::push_back(const value_type& __x) { __node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, __x); - __link_pointer __nl = __node->__as_link(); + __base_pointer __nl = __node->__as_link(); __link_nodes_at_back(__nl, __nl); ++base::__sz(); } @@ -1243,7 +1245,7 @@ void list<_Tp, _Alloc>::push_back(const value_type& __x) { template <class _Tp, class _Alloc> void list<_Tp, _Alloc>::push_front(value_type&& __x) { __node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, std::move(__x)); - __link_pointer __nl = __node->__as_link(); + __base_pointer __nl = __node->__as_link(); __link_nodes_at_front(__nl, __nl); ++base::__sz(); } @@ -1251,7 +1253,7 @@ void list<_Tp, _Alloc>::push_front(value_type&& __x) { template <class _Tp, class _Alloc> void list<_Tp, _Alloc>::push_back(value_type&& __x) { __node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, std::move(__x)); - __link_pointer __nl = __node->__as_link(); + __base_pointer __nl = __node->__as_link(); __link_nodes_at_back(__nl, __nl); ++base::__sz(); } @@ -1266,7 +1268,7 @@ void list<_Tp, _Alloc>::emplace_front(_Args&&... __args) { __node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, std::forward<_Args>(__args)...); - __link_pointer __nl = __node->__as_link(); + __base_pointer __nl = __node->__as_link(); __link_nodes_at_front(__nl, __nl); ++base::__sz(); # if _LIBCPP_STD_VER >= 17 @@ -1284,7 +1286,7 @@ void list<_Tp, _Alloc>::emplace_back(_Args&&... __args) { __node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, std::forward<_Args>(__args)...); - __link_pointer __nl = __node->__as_link(); + __base_pointer __nl = __node->__as_link(); __link_nodes_at_back(__nl, __nl); ++base::__sz(); # if _LIBCPP_STD_VER >= 17 @@ -1297,7 +1299,7 @@ template <class... _Args> typename list<_Tp, _Alloc>::iterator list<_Tp, _Alloc>::emplace(const_iterator __p, _Args&&... __args) { __node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, std::forward<_Args>(__args)...); - __link_pointer __nl = __node->__as_link(); + __base_pointer __nl = __node->__as_link(); __link_nodes(__p.__ptr_, __nl, __nl); ++base::__sz(); return iterator(__nl); @@ -1306,7 +1308,7 @@ typename list<_Tp, _Alloc>::iterator list<_Tp, _Alloc>::emplace(const_iterator _ template <class _Tp, class _Alloc> typename list<_Tp, _Alloc>::iterator list<_Tp, _Alloc>::insert(const_iterator __p, value_type&& __x) { __node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, std::move(__x)); - __link_pointer __nl = __node->__as_link(); + __base_pointer __nl = __node->__as_link(); __link_nodes(__p.__ptr_, __nl, __nl); ++base::__sz(); return iterator(__nl); @@ -1317,7 +1319,7 @@ typename list<_Tp, _Alloc>::iterator list<_Tp, _Alloc>::insert(const_iterator __ template <class _Tp, class _Alloc> void list<_Tp, _Alloc>::pop_front() { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(!empty(), "list::pop_front() called with empty list"); - __link_pointer __n = base::__end_.__next_; + __base_pointer __n = base::__end_.__next_; base::__unlink_nodes(__n, __n); --base::__sz(); this->__delete_node(__n->__as_node()); @@ -1326,7 +1328,7 @@ void list<_Tp, _Alloc>::pop_front() { template <class _Tp, class _Alloc> void list<_Tp, _Alloc>::pop_back() { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(!empty(), "list::pop_back() called on an empty list"); - __link_pointer __n = base::__end_.__prev_; + __base_pointer __n = base::__end_.__prev_; base::__unlink_nodes(__n, __n); --base::__sz(); this->__delete_node(__n->__as_node()); @@ -1335,8 +1337,8 @@ void list<_Tp, _Alloc>::pop_back() { template <class _Tp, class _Alloc> typename list<_Tp, _Alloc>::iterator list<_Tp, _Alloc>::erase(const_iterator __p) { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(__p != end(), "list::erase(iterator) called with a non-dereferenceable iterator"); - __link_pointer __n = __p.__ptr_; - __link_pointer __r = __n->__next_; + __base_pointer __n = __p.__ptr_; + __base_pointer __r = __n->__next_; base::__unlink_nodes(__n, __n); --base::__sz(); this->__delete_node(__n->__as_node()); @@ -1348,7 +1350,7 @@ typename list<_Tp, _Alloc>::iterator list<_Tp, _Alloc>::erase(const_iterator __f if (__f != __l) { base::__unlink_nodes(__f.__ptr_, __l.__ptr_->__prev_); while (__f != __l) { - __link_pointer __n = __f.__ptr_; + __base_pointer __n = __f.__ptr_; ++__f; --base::__sz(); this->__delete_node(__n->__as_node()); @@ -1377,7 +1379,7 @@ void list<_Tp, _Alloc>::resize(size_type __n) { #ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { while (true) { - __link_pointer __prev = __e.__ptr_->__prev_; + __base_pointer __prev = __e.__ptr_->__prev_; __node_pointer __current = __e.__ptr_->__as_node(); this->__delete_node(__current); if (__prev == 0) @@ -1401,7 +1403,7 @@ void list<_Tp, _Alloc>::resize(size_type __n, const value_type& __x) { size_type __ds = 0; __node_pointer __node = this->__create_node(/* prev = */ nullptr, /* next = */ nullptr, __x); ++__ds; - __link_pointer __nl = __node->__as_link(); + __base_pointer __nl = __node->__as_link(); iterator __r = iterator(__nl); iterator __e = __r; #ifndef _LIBCPP_HAS_NO_EXCEPTIONS @@ -1413,7 +1415,7 @@ void list<_Tp, _Alloc>::resize(size_type __n, const value_type& __x) { #ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { while (true) { - __link_pointer __prev = __e.__ptr_->__prev_; + __base_pointer __prev = __e.__ptr_->__prev_; __node_pointer __current = __e.__ptr_->__as_node(); this->__delete_node(__current); if (__prev == 0) @@ -1433,8 +1435,8 @@ void list<_Tp, _Alloc>::splice(const_iterator __p, list& __c) { _LIBCPP_ASSERT_VALID_INPUT_RANGE( this != std::addressof(__c), "list::splice(iterator, list) called with this == &list"); if (!__c.empty()) { - __link_pointer __f = __c.__end_.__next_; - __link_pointer __l = __c.__end_.__prev_; + __base_pointer __f = __c.__end_.__next_; + __base_pointer __l = __c.__end_.__prev_; base::__unlink_nodes(__f, __l); __link_nodes(__p.__ptr_, __f, __l); base::__sz() += __c.__sz(); @@ -1445,7 +1447,7 @@ void list<_Tp, _Alloc>::splice(const_iterator __p, list& __c) { template <class _Tp, class _Alloc> void list<_Tp, _Alloc>::splice(const_iterator __p, list& __c, const_iterator __i) { if (__p.__ptr_ != __i.__ptr_ && __p.__ptr_ != __i.__ptr_->__next_) { - __link_pointer __f = __i.__ptr_; + __base_pointer __f = __i.__ptr_; base::__unlink_nodes(__f, __f); __link_nodes(__p.__ptr_, __f, __f); --__c.__sz(); @@ -1456,9 +1458,9 @@ void list<_Tp, _Alloc>::splice(const_iterator __p, list& __c, const_iterator __i template <class _Tp, class _Alloc> void list<_Tp, _Alloc>::splice(const_iterator __p, list& __c, const_iterator __f, const_iterator __l) { if (__f != __l) { - __link_pointer __first = __f.__ptr_; + __base_pointer __first = __f.__ptr_; --__l; - __link_pointer __last = __l.__ptr_; + __base_pointer __last = __l.__ptr_; if (this != std::addressof(__c)) { size_type __s = std::distance(__f, __l) + 1; __c.__sz() -= __s; @@ -1546,8 +1548,8 @@ void list<_Tp, _Alloc>::merge(list& __c, _Comp __comp) { ; base::__sz() += __ds; __c.__sz() -= __ds; - __link_pointer __f = __f2.__ptr_; - __link_pointer __l = __m2.__ptr_->__prev_; + __base_pointer __f = __f2.__ptr_; + __base_pointer __l = __m2.__ptr_->__prev_; __f2 = __m2; base::__unlink_nodes(__f, __l); __m2 = std::next(__f1); @@ -1581,7 +1583,7 @@ list<_Tp, _Alloc>::__sort(iterator __f1, iterator __e2, size_type __n, _Comp& __ return __f1; case 2: if (__comp(*--__e2, *__f1)) { - __link_pointer __f = __e2.__ptr_; + __base_pointer __f = __e2.__ptr_; base::__unlink_nodes(__f, __f); __link_nodes(__f1.__ptr_, __f, __f); return __e2; @@ -1596,8 +1598,8 @@ list<_Tp, _Alloc>::__sort(iterator __f1, iterator __e2, size_type __n, _Comp& __ iterator __m2 = std::next(__f2); for (; __m2 != __e2 && __comp(*__m2, *__f1); ++__m2) ; - __link_pointer __f = __f2.__ptr_; - __link_pointer __l = __m2.__ptr_->__prev_; + __base_pointer __f = __f2.__ptr_; + __base_pointer __l = __m2.__ptr_->__prev_; __r = __f2; __e1 = __f2 = __m2; base::__unlink_nodes(__f, __l); @@ -1611,8 +1613,8 @@ list<_Tp, _Alloc>::__sort(iterator __f1, iterator __e2, size_type __n, _Comp& __ iterator __m2 = std::next(__f2); for (; __m2 != __e2 && __comp(*__m2, *__f1); ++__m2) ; - __link_pointer __f = __f2.__ptr_; - __link_pointer __l = __m2.__ptr_->__prev_; + __base_pointer __f = __f2.__ptr_; + __base_pointer __l = __m2.__ptr_->__prev_; if (__e1 == __f2) __e1 = __m2; __f2 = __m2; diff --git a/libcxx/include/memory b/libcxx/include/memory index b940a32..db3386c 100644 --- a/libcxx/include/memory +++ b/libcxx/include/memory @@ -182,8 +182,8 @@ public: raw_storage_iterator operator++(int); }; -template <class T> pair<T*,ptrdiff_t> get_temporary_buffer(ptrdiff_t n) noexcept; -template <class T> void return_temporary_buffer(T* p) noexcept; +template <class T> pair<T*,ptrdiff_t> get_temporary_buffer(ptrdiff_t n) noexcept; // deprecated in C++17, removed in C++20 +template <class T> void return_temporary_buffer(T* p) noexcept; // deprecated in C++17, removed in C++20 template <class T> T* addressof(T& r) noexcept; template <class T> T* addressof(const T&& r) noexcept = delete; diff --git a/libcxx/include/module.modulemap b/libcxx/include/module.modulemap index cc41912..c1181a3 100644 --- a/libcxx/include/module.modulemap +++ b/libcxx/include/module.modulemap @@ -1542,6 +1542,11 @@ module std_private_memory_unique_ptr [system] { export std_private_type_traits_is_pointer export std_private_type_traits_type_identity } +module std_private_memory_unique_temporary_buffer [system] { + header "__memory/unique_temporary_buffer.h" + export std_private_memory_unique_ptr + export std_private_type_traits_is_constant_evaluated +} module std_private_memory_uses_allocator [system] { header "__memory/uses_allocator.h" } module std_private_memory_uses_allocator_construction [system] { header "__memory/uses_allocator_construction.h" } module std_private_memory_voidify [system] { header "__memory/voidify.h" } diff --git a/libcxx/include/ostream b/libcxx/include/ostream index 359d3c0..8374288 100644 --- a/libcxx/include/ostream +++ b/libcxx/include/ostream @@ -174,17 +174,21 @@ void vprint_nonunicode(ostream& os, string_view fmt, format_args args); #include <__config> -#include <__ostream/basic_ostream.h> +#if !defined(_LIBCPP_HAS_NO_LOCALIZATION) -#if _LIBCPP_STD_VER >= 23 -# include <__ostream/print.h> -#endif +# include <__ostream/basic_ostream.h> -#include <version> +# if _LIBCPP_STD_VER >= 23 +# include <__ostream/print.h> +# endif -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif +# include <version> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif + +#endif // !defined(_LIBCPP_HAS_NO_LOCALIZATION) #if !defined(_LIBCPP_REMOVE_TRANSITIVE_INCLUDES) && _LIBCPP_STD_VER <= 20 # include <atomic> diff --git a/libcxx/include/sstream b/libcxx/include/sstream index 78a7f2d..553cc08 100644 --- a/libcxx/include/sstream +++ b/libcxx/include/sstream @@ -313,21 +313,24 @@ typedef basic_stringstream<wchar_t> wstringstream; // clang-format on #include <__config> -#include <__fwd/sstream.h> -#include <__ostream/basic_ostream.h> -#include <__type_traits/is_convertible.h> -#include <__utility/swap.h> -#include <istream> -#include <string> -#include <string_view> -#include <version> - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif + +#ifndef _LIBCPP_HAS_NO_LOCALIZATION + +# include <__fwd/sstream.h> +# include <__ostream/basic_ostream.h> +# include <__type_traits/is_convertible.h> +# include <__utility/swap.h> +# include <istream> +# include <string> +# include <string_view> +# include <version> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif _LIBCPP_PUSH_MACROS -#include <__undef_macros> +# include <__undef_macros> _LIBCPP_BEGIN_NAMESPACE_STD @@ -370,7 +373,7 @@ public: str(__s); } -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI explicit basic_stringbuf(const allocator_type& __a) : basic_stringbuf(ios_base::in | ios_base::out, __a) {} @@ -404,9 +407,9 @@ public: : __str_(__s), __hm_(nullptr), __mode_(__wch) { __init_buf_ptrs(); } -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 -#if _LIBCPP_STD_VER >= 26 +# if _LIBCPP_STD_VER >= 26 template <class _Tp> requires is_convertible_v<const _Tp&, basic_string_view<_CharT, _Traits>> @@ -428,37 +431,37 @@ public: __init_buf_ptrs(); } -#endif // _LIBCPP_STD_VER >= 26 +# endif // _LIBCPP_STD_VER >= 26 basic_stringbuf(const basic_stringbuf&) = delete; basic_stringbuf(basic_stringbuf&& __rhs) : __mode_(__rhs.__mode_) { __move_init(std::move(__rhs)); } -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI basic_stringbuf(basic_stringbuf&& __rhs, const allocator_type& __a) : basic_stringbuf(__rhs.__mode_, __a) { __move_init(std::move(__rhs)); } -#endif +# endif // [stringbuf.assign] Assign and swap: basic_stringbuf& operator=(const basic_stringbuf&) = delete; basic_stringbuf& operator=(basic_stringbuf&& __rhs); void swap(basic_stringbuf& __rhs) -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 noexcept(allocator_traits<allocator_type>::propagate_on_container_swap::value || allocator_traits<allocator_type>::is_always_equal::value) -#endif +# endif ; // [stringbuf.members] Member functions: -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI allocator_type get_allocator() const noexcept { return __str_.get_allocator(); } -#endif +# endif -#if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY) +# if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY) string_type str() const; -#else +# else _LIBCPP_HIDE_FROM_ABI string_type str() const& { return str(__str_.get_allocator()); } _LIBCPP_HIDE_FROM_ABI string_type str() && { @@ -472,9 +475,9 @@ public: __init_buf_ptrs(); return __result; } -#endif // _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY) +# endif // _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY) -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 template <class _SAlloc> requires __is_allocator<_SAlloc>::value _LIBCPP_HIDE_FROM_ABI basic_string<char_type, traits_type, _SAlloc> str(const _SAlloc& __sa) const { @@ -482,14 +485,14 @@ public: } _LIBCPP_HIDE_FROM_ABI basic_string_view<char_type, traits_type> view() const noexcept; -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 void str(const string_type& __s) { __str_ = __s; __init_buf_ptrs(); } -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 template <class _SAlloc> requires(!is_same_v<_SAlloc, allocator_type>) _LIBCPP_HIDE_FROM_ABI void str(const basic_string<char_type, traits_type, _SAlloc>& __s) { @@ -501,9 +504,9 @@ public: __str_ = std::move(__s); __init_buf_ptrs(); } -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 -#if _LIBCPP_STD_VER >= 26 +# if _LIBCPP_STD_VER >= 26 template <class _Tp> requires is_convertible_v<const _Tp&, basic_string_view<_CharT, _Traits>> @@ -513,7 +516,7 @@ public: __init_buf_ptrs(); } -#endif // _LIBCPP_STD_VER >= 26 +# endif // _LIBCPP_STD_VER >= 26 protected: // [stringbuf.virtuals] Overridden virtual functions: @@ -609,10 +612,10 @@ basic_stringbuf<_CharT, _Traits, _Allocator>::operator=(basic_stringbuf&& __rhs) template <class _CharT, class _Traits, class _Allocator> void basic_stringbuf<_CharT, _Traits, _Allocator>::swap(basic_stringbuf& __rhs) -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 noexcept(allocator_traits<_Allocator>::propagate_on_container_swap::value || allocator_traits<_Allocator>::is_always_equal::value) -#endif +# endif { char_type* __p = const_cast<char_type*>(__rhs.__str_.data()); ptrdiff_t __rbinp = -1; @@ -682,14 +685,14 @@ void basic_stringbuf<_CharT, _Traits, _Allocator>::swap(basic_stringbuf& __rhs) template <class _CharT, class _Traits, class _Allocator> inline _LIBCPP_HIDE_FROM_ABI void swap(basic_stringbuf<_CharT, _Traits, _Allocator>& __x, basic_stringbuf<_CharT, _Traits, _Allocator>& __y) -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 noexcept(noexcept(__x.swap(__y))) -#endif +# endif { __x.swap(__y); } -#if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY) +# if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY) template <class _CharT, class _Traits, class _Allocator> basic_string<_CharT, _Traits, _Allocator> basic_stringbuf<_CharT, _Traits, _Allocator>::str() const { if (__mode_ & ios_base::out) { @@ -700,7 +703,7 @@ basic_string<_CharT, _Traits, _Allocator> basic_stringbuf<_CharT, _Traits, _Allo return string_type(this->eback(), this->egptr(), __str_.get_allocator()); return string_type(__str_.get_allocator()); } -#endif // _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY) +# endif // _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY) template <class _CharT, class _Traits, class _Allocator> _LIBCPP_HIDE_FROM_ABI void basic_stringbuf<_CharT, _Traits, _Allocator>::__init_buf_ptrs() { @@ -726,7 +729,7 @@ _LIBCPP_HIDE_FROM_ABI void basic_stringbuf<_CharT, _Traits, _Allocator>::__init_ } } -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 template <class _CharT, class _Traits, class _Allocator> _LIBCPP_HIDE_FROM_ABI basic_string_view<_CharT, _Traits> basic_stringbuf<_CharT, _Traits, _Allocator>::view() const noexcept { @@ -738,7 +741,7 @@ basic_stringbuf<_CharT, _Traits, _Allocator>::view() const noexcept { return basic_string_view<_CharT, _Traits>(this->eback(), this->egptr()); return basic_string_view<_CharT, _Traits>(); } -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 template <class _CharT, class _Traits, class _Allocator> typename basic_stringbuf<_CharT, _Traits, _Allocator>::int_type @@ -781,9 +784,9 @@ basic_stringbuf<_CharT, _Traits, _Allocator>::overflow(int_type __c) { if (this->pptr() == this->epptr()) { if (!(__mode_ & ios_base::out)) return traits_type::eof(); -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS ptrdiff_t __nout = this->pptr() - this->pbase(); ptrdiff_t __hm = __hm_ - this->pbase(); __str_.push_back(char_type()); @@ -792,11 +795,11 @@ basic_stringbuf<_CharT, _Traits, _Allocator>::overflow(int_type __c) { this->setp(__p, __p + __str_.size()); this->__pbump(__nout); __hm_ = this->pbase() + __hm; -#ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { return traits_type::eof(); } -#endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS } __hm_ = std::max(this->pptr() + 1, __hm_); if (__mode_ & ios_base::in) { @@ -881,7 +884,7 @@ public: _LIBCPP_HIDE_FROM_ABI explicit basic_istringstream(const string_type& __s, ios_base::openmode __wch = ios_base::in) : basic_istream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__s, __wch | ios_base::in) {} -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI basic_istringstream(ios_base::openmode __wch, const _Allocator& __a) : basic_istream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__wch | ios_base::in, __a) {} @@ -901,9 +904,9 @@ public: _LIBCPP_HIDE_FROM_ABI explicit basic_istringstream(const basic_string<_CharT, _Traits, _SAlloc>& __s, ios_base::openmode __wch = ios_base::in) : basic_istream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__s, __wch | ios_base::in) {} -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 -#if _LIBCPP_STD_VER >= 26 +# if _LIBCPP_STD_VER >= 26 template <class _Tp> requires is_convertible_v<const _Tp&, basic_string_view<_CharT, _Traits>> @@ -920,7 +923,7 @@ public: _LIBCPP_HIDE_FROM_ABI basic_istringstream(const _Tp& __t, ios_base::openmode __which, const _Allocator& __a) : basic_istream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__t, __which | ios_base::in, __a) {} -#endif // _LIBCPP_STD_VER >= 26 +# endif // _LIBCPP_STD_VER >= 26 basic_istringstream(const basic_istringstream&) = delete; _LIBCPP_HIDE_FROM_ABI basic_istringstream(basic_istringstream&& __rhs) @@ -945,15 +948,15 @@ public: return const_cast<basic_stringbuf<char_type, traits_type, allocator_type>*>(std::addressof(__sb_)); } -#if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY) +# if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY) _LIBCPP_HIDE_FROM_ABI string_type str() const { return __sb_.str(); } -#else +# else _LIBCPP_HIDE_FROM_ABI string_type str() const& { return __sb_.str(); } _LIBCPP_HIDE_FROM_ABI string_type str() && { return std::move(__sb_).str(); } -#endif +# endif -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 template <class _SAlloc> requires __is_allocator<_SAlloc>::value _LIBCPP_HIDE_FROM_ABI basic_string<char_type, traits_type, _SAlloc> str(const _SAlloc& __sa) const { @@ -961,26 +964,26 @@ public: } _LIBCPP_HIDE_FROM_ABI basic_string_view<char_type, traits_type> view() const noexcept { return __sb_.view(); } -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI void str(const string_type& __s) { __sb_.str(__s); } -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 template <class _SAlloc> _LIBCPP_HIDE_FROM_ABI void str(const basic_string<char_type, traits_type, _SAlloc>& __s) { __sb_.str(__s); } _LIBCPP_HIDE_FROM_ABI void str(string_type&& __s) { __sb_.str(std::move(__s)); } -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 -#if _LIBCPP_STD_VER >= 26 +# if _LIBCPP_STD_VER >= 26 template <class _Tp> requires is_convertible_v<const _Tp&, basic_string_view<_CharT, _Traits>> _LIBCPP_HIDE_FROM_ABI void str(const _Tp& __t) { rdbuf()->str(__t); } -#endif // _LIBCPP_STD_VER >= 26 +# endif // _LIBCPP_STD_VER >= 26 }; template <class _CharT, class _Traits, class _Allocator> @@ -1017,7 +1020,7 @@ public: _LIBCPP_HIDE_FROM_ABI explicit basic_ostringstream(const string_type& __s, ios_base::openmode __wch = ios_base::out) : basic_ostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__s, __wch | ios_base::out) {} -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI basic_ostringstream(ios_base::openmode __wch, const _Allocator& __a) : basic_ostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__wch | ios_base::out, __a) {} @@ -1038,9 +1041,9 @@ public: _LIBCPP_HIDE_FROM_ABI explicit basic_ostringstream(const basic_string<_CharT, _Traits, _SAlloc>& __s, ios_base::openmode __wch = ios_base::out) : basic_ostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__s, __wch | ios_base::out) {} -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 -#if _LIBCPP_STD_VER >= 26 +# if _LIBCPP_STD_VER >= 26 template <class _Tp> requires is_convertible_v<const _Tp&, basic_string_view<_CharT, _Traits>> @@ -1057,7 +1060,7 @@ public: _LIBCPP_HIDE_FROM_ABI basic_ostringstream(const _Tp& __t, ios_base::openmode __which, const _Allocator& __a) : basic_ostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__t, __which | ios_base::out, __a) {} -#endif // _LIBCPP_STD_VER >= 26 +# endif // _LIBCPP_STD_VER >= 26 basic_ostringstream(const basic_ostringstream&) = delete; _LIBCPP_HIDE_FROM_ABI basic_ostringstream(basic_ostringstream&& __rhs) @@ -1083,15 +1086,15 @@ public: return const_cast<basic_stringbuf<char_type, traits_type, allocator_type>*>(std::addressof(__sb_)); } -#if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY) +# if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY) _LIBCPP_HIDE_FROM_ABI string_type str() const { return __sb_.str(); } -#else +# else _LIBCPP_HIDE_FROM_ABI string_type str() const& { return __sb_.str(); } _LIBCPP_HIDE_FROM_ABI string_type str() && { return std::move(__sb_).str(); } -#endif +# endif -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 template <class _SAlloc> requires __is_allocator<_SAlloc>::value _LIBCPP_HIDE_FROM_ABI basic_string<char_type, traits_type, _SAlloc> str(const _SAlloc& __sa) const { @@ -1099,26 +1102,26 @@ public: } _LIBCPP_HIDE_FROM_ABI basic_string_view<char_type, traits_type> view() const noexcept { return __sb_.view(); } -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI void str(const string_type& __s) { __sb_.str(__s); } -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 template <class _SAlloc> _LIBCPP_HIDE_FROM_ABI void str(const basic_string<char_type, traits_type, _SAlloc>& __s) { __sb_.str(__s); } _LIBCPP_HIDE_FROM_ABI void str(string_type&& __s) { __sb_.str(std::move(__s)); } -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 -#if _LIBCPP_STD_VER >= 26 +# if _LIBCPP_STD_VER >= 26 template <class _Tp> requires is_convertible_v<const _Tp&, basic_string_view<_CharT, _Traits>> _LIBCPP_HIDE_FROM_ABI void str(const _Tp& __t) { rdbuf()->str(__t); } -#endif // _LIBCPP_STD_VER >= 26 +# endif // _LIBCPP_STD_VER >= 26 }; template <class _CharT, class _Traits, class _Allocator> @@ -1156,7 +1159,7 @@ public: ios_base::openmode __wch = ios_base::in | ios_base::out) : basic_iostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__s, __wch) {} -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI basic_stringstream(ios_base::openmode __wch, const _Allocator& __a) : basic_iostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__wch, __a) {} @@ -1178,9 +1181,9 @@ public: _LIBCPP_HIDE_FROM_ABI explicit basic_stringstream(const basic_string<_CharT, _Traits, _SAlloc>& __s, ios_base::openmode __wch = ios_base::out | ios_base::in) : basic_iostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__s, __wch) {} -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 -#if _LIBCPP_STD_VER >= 26 +# if _LIBCPP_STD_VER >= 26 template <class _Tp> requires is_convertible_v<const _Tp&, basic_string_view<_CharT, _Traits>> @@ -1198,7 +1201,7 @@ public: _LIBCPP_HIDE_FROM_ABI basic_stringstream(const _Tp& __t, ios_base::openmode __which, const _Allocator& __a) : basic_iostream<_CharT, _Traits>(std::addressof(__sb_)), __sb_(__t, __which, __a) {} -#endif // _LIBCPP_STD_VER >= 26 +# endif // _LIBCPP_STD_VER >= 26 basic_stringstream(const basic_stringstream&) = delete; _LIBCPP_HIDE_FROM_ABI basic_stringstream(basic_stringstream&& __rhs) @@ -1223,15 +1226,15 @@ public: return const_cast<basic_stringbuf<char_type, traits_type, allocator_type>*>(std::addressof(__sb_)); } -#if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY) +# if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_BUILDING_LIBRARY) _LIBCPP_HIDE_FROM_ABI string_type str() const { return __sb_.str(); } -#else +# else _LIBCPP_HIDE_FROM_ABI string_type str() const& { return __sb_.str(); } _LIBCPP_HIDE_FROM_ABI string_type str() && { return std::move(__sb_).str(); } -#endif +# endif -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 template <class _SAlloc> requires __is_allocator<_SAlloc>::value _LIBCPP_HIDE_FROM_ABI basic_string<char_type, traits_type, _SAlloc> str(const _SAlloc& __sa) const { @@ -1239,26 +1242,26 @@ public: } _LIBCPP_HIDE_FROM_ABI basic_string_view<char_type, traits_type> view() const noexcept { return __sb_.view(); } -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI void str(const string_type& __s) { __sb_.str(__s); } -#if _LIBCPP_STD_VER >= 20 +# if _LIBCPP_STD_VER >= 20 template <class _SAlloc> _LIBCPP_HIDE_FROM_ABI void str(const basic_string<char_type, traits_type, _SAlloc>& __s) { __sb_.str(__s); } _LIBCPP_HIDE_FROM_ABI void str(string_type&& __s) { __sb_.str(std::move(__s)); } -#endif // _LIBCPP_STD_VER >= 20 +# endif // _LIBCPP_STD_VER >= 20 -#if _LIBCPP_STD_VER >= 26 +# if _LIBCPP_STD_VER >= 26 template <class _Tp> requires is_convertible_v<const _Tp&, basic_string_view<_CharT, _Traits>> _LIBCPP_HIDE_FROM_ABI void str(const _Tp& __t) { rdbuf()->str(__t); } -#endif // _LIBCPP_STD_VER >= 26 +# endif // _LIBCPP_STD_VER >= 26 }; template <class _CharT, class _Traits, class _Allocator> @@ -1267,17 +1270,19 @@ swap(basic_stringstream<_CharT, _Traits, _Allocator>& __x, basic_stringstream<_C __x.swap(__y); } -#if _LIBCPP_AVAILABILITY_HAS_ADDITIONAL_IOSTREAM_EXPLICIT_INSTANTIATIONS_1 +# if _LIBCPP_AVAILABILITY_HAS_ADDITIONAL_IOSTREAM_EXPLICIT_INSTANTIATIONS_1 extern template class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_stringbuf<char>; extern template class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_stringstream<char>; extern template class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_ostringstream<char>; extern template class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_istringstream<char>; -#endif +# endif _LIBCPP_END_NAMESPACE_STD _LIBCPP_POP_MACROS +#endif // !_LIBCPP_HAS_NO_LOCALIZATION + #if _LIBCPP_STD_VER <= 20 && !defined(_LIBCPP_REMOVE_TRANSITIVE_INCLUDES) # include <ostream> # include <type_traits> diff --git a/libcxx/include/streambuf b/libcxx/include/streambuf index 5a3c17e..906340e 100644 --- a/libcxx/include/streambuf +++ b/libcxx/include/streambuf @@ -107,23 +107,26 @@ protected: */ -#include <__assert> #include <__config> -#include <__fwd/streambuf.h> -#include <__locale> -#include <__type_traits/is_same.h> -#include <__utility/is_valid_range.h> -#include <climits> -#include <ios> -#include <iosfwd> -#include <version> - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif + +#if !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +# include <__assert> +# include <__fwd/streambuf.h> +# include <__locale> +# include <__type_traits/is_same.h> +# include <__utility/is_valid_range.h> +# include <climits> +# include <ios> +# include <iosfwd> +# include <version> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif _LIBCPP_PUSH_MACROS -#include <__undef_macros> +# include <__undef_macros> _LIBCPP_BEGIN_NAMESPACE_STD @@ -430,14 +433,16 @@ typename basic_streambuf<_CharT, _Traits>::int_type basic_streambuf<_CharT, _Tra extern template class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_streambuf<char>; -#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS +# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS extern template class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_streambuf<wchar_t>; -#endif +# endif _LIBCPP_END_NAMESPACE_STD _LIBCPP_POP_MACROS +#endif // !defined(_LIBCPP_HAS_NO_LOCALIZATION) + #if !defined(_LIBCPP_REMOVE_TRANSITIVE_INCLUDES) && _LIBCPP_STD_VER <= 20 # include <cstdint> #endif diff --git a/libcxx/include/string b/libcxx/include/string index e8c9bce..7635902 100644 --- a/libcxx/include/string +++ b/libcxx/include/string @@ -918,18 +918,18 @@ private: __long __l; }; - __compressed_pair<__rep, allocator_type> __r_; + _LIBCPP_COMPRESSED_PAIR(__rep, __rep_, allocator_type, __alloc_); // Construct a string with the given allocator and enough storage to hold `__size` characters, but // don't initialize the characters. The contents of the string, including the null terminator, must be // initialized separately. _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit basic_string( __uninitialized_size_tag, size_type __size, const allocator_type& __a) - : __r_(__default_init_tag(), __a) { + : __alloc_(__a) { if (__size > max_size()) __throw_length_error(); if (__fits_in_sso(__size)) { - __r_.first() = __rep(); + __rep_ = __rep(); __set_short_size(__size); } else { auto __capacity = __recommend(__size) + 1; @@ -945,7 +945,7 @@ private: template <class _Iter, class _Sent> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(__init_with_sentinel_tag, _Iter __first, _Sent __last, const allocator_type& __a) - : __r_(__default_init_tag(), __a) { + : __alloc_(__a) { __init_with_sentinel(std::move(__first), std::move(__last)); } @@ -983,7 +983,7 @@ public: _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string() _NOEXCEPT_(is_nothrow_default_constructible<allocator_type>::value) - : __r_(__value_init_tag(), __default_init_tag()) { + : __rep_() { __annotate_new(0); } @@ -993,14 +993,14 @@ public: #else _NOEXCEPT #endif - : __r_(__value_init_tag(), __a) { + : __rep_(), __alloc_(__a) { __annotate_new(0); } _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_STRING_INTERNAL_MEMORY_ACCESS basic_string(const basic_string& __str) - : __r_(__default_init_tag(), __alloc_traits::select_on_container_copy_construction(__str.__alloc())) { + : __alloc_(__alloc_traits::select_on_container_copy_construction(__str.__alloc())) { if (!__str.__is_long()) { - __r_.first() = __str.__r_.first(); + __rep_ = __str.__rep_; __annotate_new(__get_short_size()); } else __init_copy_ctor_external(std::__to_address(__str.__get_long_pointer()), __str.__get_long_size()); @@ -1008,9 +1008,9 @@ public: _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_STRING_INTERNAL_MEMORY_ACCESS basic_string(const basic_string& __str, const allocator_type& __a) - : __r_(__default_init_tag(), __a) { + : __alloc_(__a) { if (!__str.__is_long()) { - __r_.first() = __str.__r_.first(); + __rep_ = __str.__rep_; __annotate_new(__get_short_size()); } else __init_copy_ctor_external(std::__to_address(__str.__get_long_pointer()), __str.__get_long_size()); @@ -1026,28 +1026,29 @@ public: // Turning off ASan instrumentation for variable initialization with _LIBCPP_STRING_INTERNAL_MEMORY_ACCESS // does not work consistently during initialization of __r_, so we instead unpoison __str's memory manually first. // __str's memory needs to be unpoisoned only in the case where it's a short string. - : __r_([](basic_string& __s) -> decltype(__s.__r_)&& { + : __rep_([](basic_string& __s) -> decltype(__s.__rep_)&& { if (!__s.__is_long()) __s.__annotate_delete(); - return std::move(__s.__r_); - }(__str)) { - __str.__r_.first() = __rep(); + return std::move(__s.__rep_); + }(__str)), + __alloc_(std::move(__str.__alloc_)) { + __str.__rep_ = __rep(); __str.__annotate_new(0); if (!__is_long()) __annotate_new(size()); } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(basic_string&& __str, const allocator_type& __a) - : __r_(__default_init_tag(), __a) { + : __alloc_(__a) { if (__str.__is_long() && __a != __str.__alloc()) // copy, not move __init(std::__to_address(__str.__get_long_pointer()), __str.__get_long_size()); else { if (__libcpp_is_constant_evaluated()) - __r_.first() = __rep(); + __rep_ = __rep(); if (!__str.__is_long()) __str.__annotate_delete(); - __r_.first() = __str.__r_.first(); - __str.__r_.first() = __rep(); + __rep_ = __str.__rep_; + __str.__rep_ = __rep(); __str.__annotate_new(0); if (!__is_long() && this != std::addressof(__str)) __annotate_new(size()); @@ -1056,15 +1057,14 @@ public: #endif // _LIBCPP_CXX03_LANG template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(const _CharT* __s) - : __r_(__default_init_tag(), __default_init_tag()) { + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(const _CharT* __s) { _LIBCPP_ASSERT_NON_NULL(__s != nullptr, "basic_string(const char*) detected nullptr"); __init(__s, traits_type::length(__s)); } template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(const _CharT* __s, const _Allocator& __a) - : __r_(__default_init_tag(), __a) { + : __alloc_(__a) { _LIBCPP_ASSERT_NON_NULL(__s != nullptr, "basic_string(const char*, allocator) detected nullptr"); __init(__s, traits_type::length(__s)); } @@ -1073,23 +1073,19 @@ public: basic_string(nullptr_t) = delete; #endif - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(const _CharT* __s, size_type __n) - : __r_(__default_init_tag(), __default_init_tag()) { + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(const _CharT* __s, size_type __n) { _LIBCPP_ASSERT_NON_NULL(__n == 0 || __s != nullptr, "basic_string(const char*, n) detected nullptr"); __init(__s, __n); } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(const _CharT* __s, size_type __n, const _Allocator& __a) - : __r_(__default_init_tag(), __a) { + : __alloc_(__a) { _LIBCPP_ASSERT_NON_NULL(__n == 0 || __s != nullptr, "basic_string(const char*, n, allocator) detected nullptr"); __init(__s, __n); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(size_type __n, _CharT __c) - : __r_(__default_init_tag(), __default_init_tag()) { - __init(__n, __c); - } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(size_type __n, _CharT __c) { __init(__n, __c); } #if _LIBCPP_STD_VER >= 23 _LIBCPP_HIDE_FROM_ABI constexpr basic_string( @@ -1098,7 +1094,7 @@ public: _LIBCPP_HIDE_FROM_ABI constexpr basic_string( basic_string&& __str, size_type __pos, size_type __n, const _Allocator& __alloc = _Allocator()) - : __r_(__default_init_tag(), __alloc) { + : __alloc_(__alloc) { if (__pos > __str.size()) __throw_out_of_range(); @@ -1114,13 +1110,13 @@ public: template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(size_type __n, _CharT __c, const _Allocator& __a) - : __r_(__default_init_tag(), __a) { + : __alloc_(__a) { __init(__n, __c); } _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(const basic_string& __str, size_type __pos, size_type __n, const _Allocator& __a = _Allocator()) - : __r_(__default_init_tag(), __a) { + : __alloc_(__a) { size_type __str_sz = __str.size(); if (__pos > __str_sz) __throw_out_of_range(); @@ -1129,7 +1125,7 @@ public: _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(const basic_string& __str, size_type __pos, const _Allocator& __a = _Allocator()) - : __r_(__default_init_tag(), __a) { + : __alloc_(__a) { size_type __str_sz = __str.size(); if (__pos > __str_sz) __throw_out_of_range(); @@ -1142,7 +1138,7 @@ public: int> = 0> _LIBCPP_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(const _Tp& __t, size_type __pos, size_type __n, const allocator_type& __a = allocator_type()) - : __r_(__default_init_tag(), __a) { + : __alloc_(__a) { __self_view __sv0 = __t; __self_view __sv = __sv0.substr(__pos, __n); __init(__sv.data(), __sv.size()); @@ -1152,8 +1148,8 @@ public: __enable_if_t<__can_be_converted_to_string_view<_CharT, _Traits, _Tp>::value && !__is_same_uncvref<_Tp, basic_string>::value, int> = 0> - _LIBCPP_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit basic_string(const _Tp& __t) - : __r_(__default_init_tag(), __default_init_tag()) { + _LIBCPP_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS + _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit basic_string(const _Tp& __t) { __self_view __sv = __t; __init(__sv.data(), __sv.size()); } @@ -1164,21 +1160,20 @@ public: int> = 0> _LIBCPP_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit basic_string(const _Tp& __t, const allocator_type& __a) - : __r_(__default_init_tag(), __a) { + : __alloc_(__a) { __self_view __sv = __t; __init(__sv.data(), __sv.size()); } template <class _InputIterator, __enable_if_t<__has_input_iterator_category<_InputIterator>::value, int> = 0> - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(_InputIterator __first, _InputIterator __last) - : __r_(__default_init_tag(), __default_init_tag()) { + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(_InputIterator __first, _InputIterator __last) { __init(__first, __last); } template <class _InputIterator, __enable_if_t<__has_input_iterator_category<_InputIterator>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(_InputIterator __first, _InputIterator __last, const allocator_type& __a) - : __r_(__default_init_tag(), __a) { + : __alloc_(__a) { __init(__first, __last); } @@ -1186,7 +1181,7 @@ public: template <_ContainerCompatibleRange<_CharT> _Range> _LIBCPP_HIDE_FROM_ABI constexpr basic_string( from_range_t, _Range&& __range, const allocator_type& __a = allocator_type()) - : __r_(__default_init_tag(), __a) { + : __alloc_(__a) { if constexpr (ranges::forward_range<_Range> || ranges::sized_range<_Range>) { __init_with_size(ranges::begin(__range), ranges::end(__range), ranges::distance(__range)); } else { @@ -1196,13 +1191,12 @@ public: #endif #ifndef _LIBCPP_CXX03_LANG - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(initializer_list<_CharT> __il) - : __r_(__default_init_tag(), __default_init_tag()) { + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(initializer_list<_CharT> __il) { __init(__il.begin(), __il.end()); } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(initializer_list<_CharT> __il, const _Allocator& __a) - : __r_(__default_init_tag(), __a) { + : __alloc_(__a) { __init(__il.begin(), __il.end()); } #endif // _LIBCPP_CXX03_LANG @@ -1467,8 +1461,8 @@ public: size_type __old_sz = __str.size(); if (!__str.__is_long()) __str.__annotate_delete(); - __r_.first() = __str.__r_.first(); - __str.__r_.first() = __rep(); + __rep_ = __str.__rep_; + __str.__rep_ = __rep(); __str.__annotate_new(0); _Traits::move(data(), data() + __pos, __len); @@ -1875,10 +1869,10 @@ private: _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_STRING_INTERNAL_MEMORY_ACCESS bool __is_long() const _NOEXCEPT { - if (__libcpp_is_constant_evaluated() && __builtin_constant_p(__r_.first().__l.__is_long_)) { - return __r_.first().__l.__is_long_; + if (__libcpp_is_constant_evaluated() && __builtin_constant_p(__rep_.__l.__is_long_)) { + return __rep_.__l.__is_long_; } - return __r_.first().__s.__is_long_; + return __rep_.__s.__is_long_; } static _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void __begin_lifetime(pointer __begin, size_type __n) { @@ -1947,27 +1941,27 @@ private: _LIBCPP_CONSTEXPR_SINCE_CXX20 iterator __insert_with_size(const_iterator __pos, _Iterator __first, _Sentinel __last, size_type __n); - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 allocator_type& __alloc() _NOEXCEPT { return __r_.second(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR const allocator_type& __alloc() const _NOEXCEPT { return __r_.second(); } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 allocator_type& __alloc() _NOEXCEPT { return __alloc_; } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR const allocator_type& __alloc() const _NOEXCEPT { return __alloc_; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_STRING_INTERNAL_MEMORY_ACCESS void __set_short_size(size_type __s) _NOEXCEPT { _LIBCPP_ASSERT_INTERNAL(__s < __min_cap, "__s should never be greater than or equal to the short string capacity"); - __r_.first().__s.__size_ = __s; - __r_.first().__s.__is_long_ = false; + __rep_.__s.__size_ = __s; + __rep_.__s.__is_long_ = false; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_STRING_INTERNAL_MEMORY_ACCESS size_type __get_short_size() const _NOEXCEPT { - _LIBCPP_ASSERT_INTERNAL(!__r_.first().__s.__is_long_, "String has to be short when trying to get the short size"); - return __r_.first().__s.__size_; + _LIBCPP_ASSERT_INTERNAL(!__rep_.__s.__is_long_, "String has to be short when trying to get the short size"); + return __rep_.__s.__size_; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void __set_long_size(size_type __s) _NOEXCEPT { - __r_.first().__l.__size_ = __s; + __rep_.__l.__size_ = __s; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 size_type __get_long_size() const _NOEXCEPT { - return __r_.first().__l.__size_; + return __rep_.__l.__size_; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void __set_size(size_type __s) _NOEXCEPT { if (__is_long()) @@ -1977,31 +1971,36 @@ private: } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void __set_long_cap(size_type __s) _NOEXCEPT { - __r_.first().__l.__cap_ = __s / __endian_factor; - __r_.first().__l.__is_long_ = true; + __rep_.__l.__cap_ = __s / __endian_factor; + __rep_.__l.__is_long_ = true; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 size_type __get_long_cap() const _NOEXCEPT { - return __r_.first().__l.__cap_ * __endian_factor; + return __rep_.__l.__cap_ * __endian_factor; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void __set_long_pointer(pointer __p) _NOEXCEPT { - __r_.first().__l.__data_ = __p; + __rep_.__l.__data_ = __p; } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pointer __get_long_pointer() _NOEXCEPT { - return _LIBCPP_ASAN_VOLATILE_WRAPPER(__r_.first().__l.__data_); + return _LIBCPP_ASAN_VOLATILE_WRAPPER(__rep_.__l.__data_); } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 const_pointer __get_long_pointer() const _NOEXCEPT { - return _LIBCPP_ASAN_VOLATILE_WRAPPER(__r_.first().__l.__data_); + return _LIBCPP_ASAN_VOLATILE_WRAPPER(__rep_.__l.__data_); } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_STRING_INTERNAL_MEMORY_ACCESS pointer __get_short_pointer() _NOEXCEPT { - return _LIBCPP_ASAN_VOLATILE_WRAPPER(pointer_traits<pointer>::pointer_to(__r_.first().__s.__data_[0])); + return _LIBCPP_ASAN_VOLATILE_WRAPPER(pointer_traits<pointer>::pointer_to(__rep_.__s.__data_[0])); } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_STRING_INTERNAL_MEMORY_ACCESS const_pointer __get_short_pointer() const _NOEXCEPT { - return _LIBCPP_ASAN_VOLATILE_WRAPPER(pointer_traits<const_pointer>::pointer_to(__r_.first().__s.__data_[0])); + return _LIBCPP_ASAN_VOLATILE_WRAPPER(pointer_traits<const_pointer>::pointer_to(__rep_.__s.__data_[0])); } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pointer __get_pointer() _NOEXCEPT { return __is_long() ? __get_long_pointer() : __get_short_pointer(); } @@ -2306,7 +2305,7 @@ template <class _CharT, class _Traits, class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 void basic_string<_CharT, _Traits, _Allocator>::__init(const value_type* __s, size_type __sz, size_type __reserve) { if (__libcpp_is_constant_evaluated()) - __r_.first() = __rep(); + __rep_ = __rep(); if (__reserve > max_size()) __throw_length_error(); pointer __p; @@ -2330,7 +2329,7 @@ template <class _CharT, class _Traits, class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 void basic_string<_CharT, _Traits, _Allocator>::__init(const value_type* __s, size_type __sz) { if (__libcpp_is_constant_evaluated()) - __r_.first() = __rep(); + __rep_ = __rep(); if (__sz > max_size()) __throw_length_error(); pointer __p; @@ -2354,7 +2353,7 @@ template <class _CharT, class _Traits, class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_NOINLINE void basic_string<_CharT, _Traits, _Allocator>::__init_copy_ctor_external(const value_type* __s, size_type __sz) { if (__libcpp_is_constant_evaluated()) - __r_.first() = __rep(); + __rep_ = __rep(); pointer __p; if (__fits_in_sso(__sz)) { @@ -2377,7 +2376,7 @@ basic_string<_CharT, _Traits, _Allocator>::__init_copy_ctor_external(const value template <class _CharT, class _Traits, class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 void basic_string<_CharT, _Traits, _Allocator>::__init(size_type __n, value_type __c) { if (__libcpp_is_constant_evaluated()) - __r_.first() = __rep(); + __rep_ = __rep(); if (__n > max_size()) __throw_length_error(); @@ -2409,7 +2408,7 @@ template <class _CharT, class _Traits, class _Allocator> template <class _InputIterator, class _Sentinel> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void basic_string<_CharT, _Traits, _Allocator>::__init_with_sentinel(_InputIterator __first, _Sentinel __last) { - __r_.first() = __rep(); + __rep_ = __rep(); __annotate_new(0); #ifndef _LIBCPP_HAS_NO_EXCEPTIONS @@ -2440,7 +2439,7 @@ template <class _InputIterator, class _Sentinel> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void basic_string<_CharT, _Traits, _Allocator>::__init_with_size(_InputIterator __first, _Sentinel __last, size_type __sz) { if (__libcpp_is_constant_evaluated()) - __r_.first() = __rep(); + __rep_ = __rep(); if (__sz > max_size()) __throw_length_error(); @@ -2660,7 +2659,7 @@ basic_string<_CharT, _Traits, _Allocator>::operator=(const basic_string& __str) size_type __old_size = __get_short_size(); if (__get_short_size() < __str.__get_short_size()) __annotate_increase(__str.__get_short_size() - __get_short_size()); - __r_.first() = __str.__r_.first(); + __rep_ = __str.__rep_; if (__old_size > __get_short_size()) __annotate_shrink(__old_size); } else { @@ -2708,7 +2707,7 @@ basic_string<_CharT, _Traits, _Allocator>::__move_assign(basic_string& __str, tr bool __str_was_short = !__str.__is_long(); __move_assign_alloc(__str); - __r_.first() = __str.__r_.first(); + __rep_ = __str.__rep_; __str.__set_short_size(0); traits_type::assign(__str.__get_short_pointer()[0], value_type()); @@ -3453,7 +3452,7 @@ inline _LIBCPP_CONSTEXPR_SINCE_CXX20 void basic_string<_CharT, _Traits, _Allocat __annotate_delete(); if (this != std::addressof(__str) && !__str.__is_long()) __str.__annotate_delete(); - std::swap(__r_.first(), __str.__r_.first()); + std::swap(__rep_, __str.__rep_); std::__swap_allocator(__alloc(), __str.__alloc()); if (!__is_long()) __annotate_new(__get_short_size()); @@ -3808,7 +3807,7 @@ inline _LIBCPP_CONSTEXPR_SINCE_CXX20 void basic_string<_CharT, _Traits, _Allocat if (__is_long()) { __annotate_delete(); __alloc_traits::deallocate(__alloc(), __get_long_pointer(), capacity() + 1); - __r_.first() = __rep(); + __rep_ = __rep(); } } diff --git a/libcxx/include/syncstream b/libcxx/include/syncstream index fea4c66..a2e1c7e 100644 --- a/libcxx/include/syncstream +++ b/libcxx/include/syncstream @@ -118,33 +118,36 @@ namespace std { */ #include <__config> -#include <__utility/move.h> -#include <ios> -#include <iosfwd> // required for declaration of default arguments -#include <streambuf> -#include <string> - -#ifndef _LIBCPP_HAS_NO_THREADS -# include <map> -# include <mutex> -# include <shared_mutex> -#endif + +#if !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +# include <__utility/move.h> +# include <ios> +# include <iosfwd> // required for declaration of default arguments +# include <streambuf> +# include <string> + +# ifndef _LIBCPP_HAS_NO_THREADS +# include <map> +# include <mutex> +# include <shared_mutex> +# endif // standard-mandated includes // [syncstream.syn] -#include <ostream> +# include <ostream> -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif _LIBCPP_PUSH_MACROS -#include <__undef_macros> +# include <__undef_macros> _LIBCPP_BEGIN_NAMESPACE_STD -#if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_SYNCSTREAM) +# if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_SYNCSTREAM) // [syncstream.syncbuf.overview]/1 // Class template basic_syncbuf stores character data written to it, @@ -157,7 +160,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD // // This helper singleton is used to implement the required // synchronisation guarantees. -# ifndef _LIBCPP_HAS_NO_THREADS +# ifndef _LIBCPP_HAS_NO_THREADS class __wrapped_streambuf_mutex { _LIBCPP_HIDE_FROM_ABI __wrapped_streambuf_mutex() = default; @@ -230,7 +233,7 @@ private: return __it; } }; -# endif // _LIBCPP_HAS_NO_THREADS +# endif // _LIBCPP_HAS_NO_THREADS // basic_syncbuf @@ -270,14 +273,14 @@ public: } _LIBCPP_HIDE_FROM_ABI ~basic_syncbuf() { -# ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -# endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS emit(); -# ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { } -# endif // _LIBCPP_HAS_NO_EXCEPTIONS +# endif // _LIBCPP_HAS_NO_EXCEPTIONS __dec_reference(); } @@ -334,9 +337,9 @@ protected: return traits_type::not_eof(__c); if (this->pptr() == this->epptr()) { -# ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -# endif +# endif size_t __size = __str_.size(); __str_.resize(__str_.capacity() + 1); _LIBCPP_ASSERT_INTERNAL(__str_.size() > __size, "the buffer hasn't grown"); @@ -345,11 +348,11 @@ protected: this->setp(__p, __p + __str_.size()); this->pbump(__size); -# ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { return traits_type::eof(); } -# endif +# endif } return this->sputc(traits_type::to_char_type(__c)); @@ -361,7 +364,7 @@ private: // TODO Use a more generic buffer. // That buffer should be light with almost no additional headers. Then // it can be use here, the __retarget_buffer, and place that use - // the now deprecated get_temporary_buffer + // the now removed get_temporary_buffer basic_string<_CharT, _Traits, _Allocator> __str_; bool __emit_on_sync_{false}; @@ -370,9 +373,9 @@ private: if (!__wrapped_) return false; -# ifndef _LIBCPP_HAS_NO_THREADS +# ifndef _LIBCPP_HAS_NO_THREADS lock_guard<mutex> __lock = __wrapped_streambuf_mutex::__instance().__get_lock(__wrapped_); -# endif +# endif bool __result = true; if (this->pptr() != this->pbase()) { @@ -404,24 +407,24 @@ private: } _LIBCPP_HIDE_FROM_ABI void __inc_reference() { -# ifndef _LIBCPP_HAS_NO_THREADS +# ifndef _LIBCPP_HAS_NO_THREADS if (__wrapped_) __wrapped_streambuf_mutex::__instance().__inc_reference(__wrapped_); -# endif +# endif } _LIBCPP_HIDE_FROM_ABI void __dec_reference() noexcept { -# ifndef _LIBCPP_HAS_NO_THREADS +# ifndef _LIBCPP_HAS_NO_THREADS if (__wrapped_) __wrapped_streambuf_mutex::__instance().__dec_reference(__wrapped_); -# endif +# endif } }; using std::syncbuf; -# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS +# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS using std::wsyncbuf; -# endif +# endif // [syncstream.syncbuf.special], specialized algorithms template <class _CharT, class _Traits, class _Allocator> @@ -477,17 +480,17 @@ public: // TODO validate other unformatted output functions. typename basic_ostream<char_type, traits_type>::sentry __s(*this); if (__s) { -# ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS try { -# endif +# endif if (__sb_.emit() == false) this->setstate(ios::badbit); -# ifndef _LIBCPP_HAS_NO_EXCEPTIONS +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS } catch (...) { this->__set_badbit_and_consider_rethrow(); } -# endif +# endif } } @@ -502,14 +505,16 @@ private: }; using std::osyncstream; -# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS +# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS using std::wosyncstream; -# endif +# endif -#endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_SYNCSTREAM) +# endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_SYNCSTREAM) _LIBCPP_END_NAMESPACE_STD _LIBCPP_POP_MACROS +#endif // !defined(_LIBCPP_HAS_NO_LOCALIZATION) + #endif // _LIBCPP_SYNCSTREAM diff --git a/libcxx/include/vector b/libcxx/include/vector index 4720f8e..2a64849 100644 --- a/libcxx/include/vector +++ b/libcxx/include/vector @@ -434,7 +434,7 @@ public: #else noexcept #endif - : __end_cap_(nullptr, __a) { + : __alloc_(__a) { } _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI explicit vector(size_type __n) { @@ -448,7 +448,7 @@ public: #if _LIBCPP_STD_VER >= 14 _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI explicit vector(size_type __n, const allocator_type& __a) - : __end_cap_(nullptr, __a) { + : __alloc_(__a) { auto __guard = std::__make_exception_guard(__destroy_vector(*this)); if (__n > 0) { __vallocate(__n); @@ -470,7 +470,7 @@ public: template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0> _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI vector(size_type __n, const value_type& __x, const allocator_type& __a) - : __end_cap_(nullptr, __a) { + : __alloc_(__a) { if (__n > 0) { __vallocate(__n); __construct_at_end(__n, __x); @@ -508,7 +508,7 @@ public: template <_ContainerCompatibleRange<_Tp> _Range> _LIBCPP_HIDE_FROM_ABI constexpr vector( from_range_t, _Range&& __range, const allocator_type& __alloc = allocator_type()) - : __end_cap_(nullptr, __alloc) { + : __alloc_(__alloc) { if constexpr (ranges::forward_range<_Range> || ranges::sized_range<_Range>) { auto __n = static_cast<size_type>(ranges::distance(__range)); __init_with_size(ranges::begin(__range), ranges::end(__range), __n); @@ -764,8 +764,7 @@ public: private: pointer __begin_ = nullptr; pointer __end_ = nullptr; - __compressed_pair<pointer, allocator_type> __end_cap_ = - __compressed_pair<pointer, allocator_type>(nullptr, __default_init_tag()); + _LIBCPP_COMPRESSED_PAIR(pointer, __cap_ = nullptr, allocator_type, __alloc_); // Allocate space for __n objects // throws length_error if __n > max_size() @@ -961,17 +960,14 @@ private: ++__tx.__pos_; } - _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI allocator_type& __alloc() _NOEXCEPT { - return this->__end_cap_.second(); - } + // TODO: Remove these now redundant accessors + _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI allocator_type& __alloc() _NOEXCEPT { return this->__alloc_; } _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI const allocator_type& __alloc() const _NOEXCEPT { - return this->__end_cap_.second(); - } - _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI pointer& __end_cap() _NOEXCEPT { - return this->__end_cap_.first(); + return this->__alloc_; } + _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI pointer& __end_cap() _NOEXCEPT { return this->__cap_; } _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI const pointer& __end_cap() const _NOEXCEPT { - return this->__end_cap_.first(); + return this->__cap_; } _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI void __clear() _NOEXCEPT { @@ -1205,7 +1201,7 @@ template <class _InputIterator, int> > _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<_Tp, _Allocator>::vector(_InputIterator __first, _InputIterator __last, const allocator_type& __a) - : __end_cap_(nullptr, __a) { + : __alloc_(__a) { __init_with_sentinel(__first, __last); } @@ -1226,21 +1222,21 @@ template <class _ForwardIterator, int> > _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<_Tp, _Allocator>::vector(_ForwardIterator __first, _ForwardIterator __last, const allocator_type& __a) - : __end_cap_(nullptr, __a) { + : __alloc_(__a) { size_type __n = static_cast<size_type>(std::distance(__first, __last)); __init_with_size(__first, __last, __n); } template <class _Tp, class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<_Tp, _Allocator>::vector(const vector& __x) - : __end_cap_(nullptr, __alloc_traits::select_on_container_copy_construction(__x.__alloc())) { + : __alloc_(__alloc_traits::select_on_container_copy_construction(__x.__alloc())) { __init_with_size(__x.__begin_, __x.__end_, __x.size()); } template <class _Tp, class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<_Tp, _Allocator>::vector(const vector& __x, const __type_identity_t<allocator_type>& __a) - : __end_cap_(nullptr, __a) { + : __alloc_(__a) { __init_with_size(__x.__begin_, __x.__end_, __x.size()); } @@ -1251,7 +1247,7 @@ _LIBCPP_CONSTEXPR_SINCE_CXX20 inline _LIBCPP_HIDE_FROM_ABI vector<_Tp, _Allocato #else _NOEXCEPT_(is_nothrow_move_constructible<allocator_type>::value) #endif - : __end_cap_(nullptr, std::move(__x.__alloc())) { + : __alloc_(std::move(__x.__alloc())) { this->__begin_ = __x.__begin_; this->__end_ = __x.__end_; this->__end_cap() = __x.__end_cap(); @@ -1261,7 +1257,7 @@ _LIBCPP_CONSTEXPR_SINCE_CXX20 inline _LIBCPP_HIDE_FROM_ABI vector<_Tp, _Allocato template <class _Tp, class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 inline _LIBCPP_HIDE_FROM_ABI vector<_Tp, _Allocator>::vector(vector&& __x, const __type_identity_t<allocator_type>& __a) - : __end_cap_(nullptr, __a) { + : __alloc_(__a) { if (__a == __x.__alloc()) { this->__begin_ = __x.__begin_; this->__end_ = __x.__end_; @@ -1291,7 +1287,7 @@ vector<_Tp, _Allocator>::vector(initializer_list<value_type> __il) { template <class _Tp, class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 inline _LIBCPP_HIDE_FROM_ABI vector<_Tp, _Allocator>::vector(initializer_list<value_type> __il, const allocator_type& __a) - : __end_cap_(nullptr, __a) { + : __alloc_(__a) { auto __guard = std::__make_exception_guard(__destroy_vector(*this)); if (__il.size() > 0) { __vallocate(__il.size()); @@ -1878,7 +1874,7 @@ private: __storage_pointer __begin_; size_type __size_; - __compressed_pair<size_type, __storage_allocator> __cap_alloc_; + _LIBCPP_COMPRESSED_PAIR(size_type, __cap_, __storage_allocator, __alloc_); public: typedef __bit_reference<vector> reference; @@ -1889,15 +1885,12 @@ public: #endif private: - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 size_type& __cap() _NOEXCEPT { return __cap_alloc_.first(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 const size_type& __cap() const _NOEXCEPT { - return __cap_alloc_.first(); - } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __storage_allocator& __alloc() _NOEXCEPT { - return __cap_alloc_.second(); - } + // TODO: Remove these now redundant accessors + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 size_type& __cap() _NOEXCEPT { return __cap_; } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 const size_type& __cap() const _NOEXCEPT { return __cap_; } + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __storage_allocator& __alloc() _NOEXCEPT { return __alloc_; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 const __storage_allocator& __alloc() const _NOEXCEPT { - return __cap_alloc_.second(); + return __alloc_; } static const unsigned __bits_per_word = static_cast<unsigned>(sizeof(__storage_type) * CHAR_BIT); @@ -1960,7 +1953,7 @@ public: #if _LIBCPP_STD_VER >= 23 template <_ContainerCompatibleRange<bool> _Range> _LIBCPP_HIDE_FROM_ABI constexpr vector(from_range_t, _Range&& __range, const allocator_type& __a = allocator_type()) - : __begin_(nullptr), __size_(0), __cap_alloc_(0, static_cast<__storage_allocator>(__a)) { + : __begin_(nullptr), __size_(0), __cap_(0), __alloc_(static_cast<__storage_allocator>(__a)) { if constexpr (ranges::forward_range<_Range> || ranges::sized_range<_Range>) { auto __n = static_cast<size_type>(ranges::distance(__range)); __init_with_size(ranges::begin(__range), ranges::end(__range), __n); @@ -2365,7 +2358,7 @@ vector<bool, _Allocator>::__construct_at_end(_InputIterator __first, _Sentinel _ template <class _Allocator> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector() _NOEXCEPT_(is_nothrow_default_constructible<allocator_type>::value) - : __begin_(nullptr), __size_(0), __cap_alloc_(0, __default_init_tag()) {} + : __begin_(nullptr), __size_(0), __cap_(0) {} template <class _Allocator> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(const allocator_type& __a) @@ -2374,12 +2367,12 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocat #else _NOEXCEPT #endif - : __begin_(nullptr), __size_(0), __cap_alloc_(0, static_cast<__storage_allocator>(__a)) { + : __begin_(nullptr), __size_(0), __cap_(0), __alloc_(static_cast<__storage_allocator>(__a)) { } template <class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(size_type __n) - : __begin_(nullptr), __size_(0), __cap_alloc_(0, __default_init_tag()) { + : __begin_(nullptr), __size_(0), __cap_(0) { if (__n > 0) { __vallocate(__n); __construct_at_end(__n, false); @@ -2389,7 +2382,7 @@ _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(size_type __n) #if _LIBCPP_STD_VER >= 14 template <class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(size_type __n, const allocator_type& __a) - : __begin_(nullptr), __size_(0), __cap_alloc_(0, static_cast<__storage_allocator>(__a)) { + : __begin_(nullptr), __size_(0), __cap_(0), __alloc_(static_cast<__storage_allocator>(__a)) { if (__n > 0) { __vallocate(__n); __construct_at_end(__n, false); @@ -2399,7 +2392,7 @@ _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(size_type __n, co template <class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(size_type __n, const value_type& __x) - : __begin_(nullptr), __size_(0), __cap_alloc_(0, __default_init_tag()) { + : __begin_(nullptr), __size_(0), __cap_(0) { if (__n > 0) { __vallocate(__n); __construct_at_end(__n, __x); @@ -2409,7 +2402,7 @@ _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(size_type __n, co template <class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(size_type __n, const value_type& __x, const allocator_type& __a) - : __begin_(nullptr), __size_(0), __cap_alloc_(0, static_cast<__storage_allocator>(__a)) { + : __begin_(nullptr), __size_(0), __cap_(0), __alloc_(static_cast<__storage_allocator>(__a)) { if (__n > 0) { __vallocate(__n); __construct_at_end(__n, __x); @@ -2419,7 +2412,7 @@ vector<bool, _Allocator>::vector(size_type __n, const value_type& __x, const all template <class _Allocator> template <class _InputIterator, __enable_if_t<__has_exactly_input_iterator_category<_InputIterator>::value, int> > _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(_InputIterator __first, _InputIterator __last) - : __begin_(nullptr), __size_(0), __cap_alloc_(0, __default_init_tag()) { + : __begin_(nullptr), __size_(0), __cap_(0) { __init_with_sentinel(__first, __last); } @@ -2427,14 +2420,14 @@ template <class _Allocator> template <class _InputIterator, __enable_if_t<__has_exactly_input_iterator_category<_InputIterator>::value, int> > _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(_InputIterator __first, _InputIterator __last, const allocator_type& __a) - : __begin_(nullptr), __size_(0), __cap_alloc_(0, static_cast<__storage_allocator>(__a)) { + : __begin_(nullptr), __size_(0), __cap_(0), __alloc_(static_cast<__storage_allocator>(__a)) { __init_with_sentinel(__first, __last); } template <class _Allocator> template <class _ForwardIterator, __enable_if_t<__has_forward_iterator_category<_ForwardIterator>::value, int> > _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(_ForwardIterator __first, _ForwardIterator __last) - : __begin_(nullptr), __size_(0), __cap_alloc_(0, __default_init_tag()) { + : __begin_(nullptr), __size_(0), __cap_(0) { auto __n = static_cast<size_type>(std::distance(__first, __last)); __init_with_size(__first, __last, __n); } @@ -2443,7 +2436,7 @@ template <class _Allocator> template <class _ForwardIterator, __enable_if_t<__has_forward_iterator_category<_ForwardIterator>::value, int> > _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(_ForwardIterator __first, _ForwardIterator __last, const allocator_type& __a) - : __begin_(nullptr), __size_(0), __cap_alloc_(0, static_cast<__storage_allocator>(__a)) { + : __begin_(nullptr), __size_(0), __cap_(0), __alloc_(static_cast<__storage_allocator>(__a)) { auto __n = static_cast<size_type>(std::distance(__first, __last)); __init_with_size(__first, __last, __n); } @@ -2452,7 +2445,7 @@ vector<bool, _Allocator>::vector(_ForwardIterator __first, _ForwardIterator __la template <class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(initializer_list<value_type> __il) - : __begin_(nullptr), __size_(0), __cap_alloc_(0, __default_init_tag()) { + : __begin_(nullptr), __size_(0), __cap_(0) { size_type __n = static_cast<size_type>(__il.size()); if (__n > 0) { __vallocate(__n); @@ -2463,7 +2456,7 @@ _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(initializer_list< template <class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(initializer_list<value_type> __il, const allocator_type& __a) - : __begin_(nullptr), __size_(0), __cap_alloc_(0, static_cast<__storage_allocator>(__a)) { + : __begin_(nullptr), __size_(0), __cap_(0), __alloc_(static_cast<__storage_allocator>(__a)) { size_type __n = static_cast<size_type>(__il.size()); if (__n > 0) { __vallocate(__n); @@ -2477,7 +2470,8 @@ template <class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(const vector& __v) : __begin_(nullptr), __size_(0), - __cap_alloc_(0, __storage_traits::select_on_container_copy_construction(__v.__alloc())) { + __cap_(0), + __alloc_(__storage_traits::select_on_container_copy_construction(__v.__alloc())) { if (__v.size() > 0) { __vallocate(__v.size()); __construct_at_end(__v.begin(), __v.end(), __v.size()); @@ -2486,7 +2480,7 @@ _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(const vector& __v template <class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(const vector& __v, const allocator_type& __a) - : __begin_(nullptr), __size_(0), __cap_alloc_(0, __a) { + : __begin_(nullptr), __size_(0), __cap_(0), __alloc_(__a) { if (__v.size() > 0) { __vallocate(__v.size()); __construct_at_end(__v.begin(), __v.end(), __v.size()); @@ -2518,7 +2512,8 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocat #endif : __begin_(__v.__begin_), __size_(__v.__size_), - __cap_alloc_(std::move(__v.__cap_alloc_)) { + __cap_(__v.__cap_), + __alloc_(std::move(__v.__alloc_)) { __v.__begin_ = nullptr; __v.__size_ = 0; __v.__cap() = 0; @@ -2527,7 +2522,7 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocat template <class _Allocator> _LIBCPP_CONSTEXPR_SINCE_CXX20 vector<bool, _Allocator>::vector(vector&& __v, const __type_identity_t<allocator_type>& __a) - : __begin_(nullptr), __size_(0), __cap_alloc_(0, __a) { + : __begin_(nullptr), __size_(0), __cap_(0), __alloc_(__a) { if (__a == allocator_type(__v.__alloc())) { this->__begin_ = __v.__begin_; this->__size_ = __v.__size_; diff --git a/libcxx/include/wchar.h b/libcxx/include/wchar.h index d4268c6..c965b28 100644 --- a/libcxx/include/wchar.h +++ b/libcxx/include/wchar.h @@ -108,11 +108,6 @@ size_t wcsrtombs(char* restrict dst, const wchar_t** restrict src, size_t len, # include <__config> # include <stddef.h> -# if defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS) -# error \ - "The <wchar.h> header is not supported since libc++ has been configured with LIBCXX_ENABLE_WIDE_CHARACTERS disabled" -# endif - # if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header # endif @@ -142,7 +137,8 @@ size_t wcsrtombs(char* restrict dst, const wchar_t** restrict src, size_t len, # endif # endif -# if defined(__cplusplus) && !defined(_LIBCPP_WCHAR_H_HAS_CONST_OVERLOADS) && defined(_LIBCPP_PREFERRED_OVERLOAD) +# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS +# if defined(__cplusplus) && !defined(_LIBCPP_WCHAR_H_HAS_CONST_OVERLOADS) && defined(_LIBCPP_PREFERRED_OVERLOAD) extern "C++" { inline _LIBCPP_HIDE_FROM_ABI wchar_t* __libcpp_wcschr(const wchar_t* __s, wchar_t __c) { return (wchar_t*)wcschr(__s, __c); @@ -197,15 +193,16 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_PREFERRED_OVERLOAD wchar_t* wmemchr(wchar_t return __libcpp_wmemchr(__s, __c, __n); } } -# endif +# endif -# if defined(__cplusplus) && (defined(_LIBCPP_MSVCRT_LIKE) || defined(__MVS__)) +# if defined(__cplusplus) && (defined(_LIBCPP_MSVCRT_LIKE) || defined(__MVS__)) extern "C" { size_t mbsnrtowcs( wchar_t* __restrict __dst, const char** __restrict __src, size_t __nmc, size_t __len, mbstate_t* __restrict __ps); size_t wcsnrtombs( char* __restrict __dst, const wchar_t** __restrict __src, size_t __nwc, size_t __len, mbstate_t* __restrict __ps); } // extern "C" -# endif // __cplusplus && (_LIBCPP_MSVCRT || __MVS__) +# endif // __cplusplus && (_LIBCPP_MSVCRT || __MVS__) +# endif // !_LIBCPP_HAS_NO_WIDE_CHARACTERS #endif // _LIBCPP_WCHAR_H diff --git a/libcxx/include/wctype.h b/libcxx/include/wctype.h index c76ec5a3..9f378ea 100644 --- a/libcxx/include/wctype.h +++ b/libcxx/include/wctype.h @@ -46,11 +46,6 @@ wctrans_t wctrans(const char* property); #include <__config> -#if defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS) -# error \ - "The <wctype.h> header is not supported since libc++ has been configured with LIBCXX_ENABLE_WIDE_CHARACTERS disabled" -#endif - #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header #endif diff --git a/libcxx/modules/std.compat.cppm.in b/libcxx/modules/std.compat.cppm.in index 0f547a2..797b413 100644 --- a/libcxx/modules/std.compat.cppm.in +++ b/libcxx/modules/std.compat.cppm.in @@ -38,12 +38,8 @@ module; #include <cstring> #include <ctime> #include <cuchar> -#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS) -# include <cwchar> -#endif -#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS) -# include <cwctype> -#endif +#include <cwchar> +#include <cwctype> // *** Headers not yet available *** // diff --git a/libcxx/modules/std.cppm.in b/libcxx/modules/std.cppm.in index 653f993..0575774 100644 --- a/libcxx/modules/std.cppm.in +++ b/libcxx/modules/std.cppm.in @@ -57,12 +57,8 @@ module; #include <cstring> #include <ctime> #include <cuchar> -#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS) -# include <cwchar> -#endif -#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS) -# include <cwctype> -#endif +#include <cwchar> +#include <cwctype> #include <deque> #include <exception> #include <execution> diff --git a/libcxx/test/libcxx/containers/associative/unord.map/abi.compile.pass.cpp b/libcxx/test/libcxx/containers/associative/unord.map/abi.compile.pass.cpp index c8e5ba0..9147ca9 100644 --- a/libcxx/test/libcxx/containers/associative/unord.map/abi.compile.pass.cpp +++ b/libcxx/test/libcxx/containers/associative/unord.map/abi.compile.pass.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// -// UNSUPPORTED: libcpp-has-abi-fix-unordered-container-size-type +// UNSUPPORTED: libcpp-has-abi-fix-unordered-container-size-type, libcpp-abi-no-compressed-pair-padding #include <cstdint> #include <unordered_map> @@ -93,7 +93,8 @@ static_assert(TEST_ALIGNOF(unordered_map_alloc<char, final_small_iter_allocator< struct TEST_ALIGNAS(32) AlignedHash {}; struct UnalignedEqualTo {}; -static_assert(sizeof(std::unordered_map<int, int, AlignedHash, UnalignedEqualTo>) == 96, ""); +// This part of the ABI has been broken between LLVM 19 and LLVM 20. +static_assert(sizeof(std::unordered_map<int, int, AlignedHash, UnalignedEqualTo>) == 64, ""); static_assert(TEST_ALIGNOF(std::unordered_map<int, int, AlignedHash, UnalignedEqualTo>) == 32, ""); #elif __SIZE_WIDTH__ == 32 @@ -126,7 +127,7 @@ static_assert(TEST_ALIGNOF(unordered_map_alloc<char, final_small_iter_allocator< struct TEST_ALIGNAS(32) AlignedHash {}; struct UnalignedEqualTo {}; -static_assert(sizeof(std::unordered_map<int, int, AlignedHash, UnalignedEqualTo>) == 96); +static_assert(sizeof(std::unordered_map<int, int, AlignedHash, UnalignedEqualTo>) == 64); static_assert(TEST_ALIGNOF(std::unordered_map<int, int, AlignedHash, UnalignedEqualTo>) == 32); #else diff --git a/libcxx/test/libcxx/containers/associative/unord.set/abi.compile.pass.cpp b/libcxx/test/libcxx/containers/associative/unord.set/abi.compile.pass.cpp index 359e248..dc6cc08 100644 --- a/libcxx/test/libcxx/containers/associative/unord.set/abi.compile.pass.cpp +++ b/libcxx/test/libcxx/containers/associative/unord.set/abi.compile.pass.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// -// UNSUPPORTED: libcpp-has-abi-fix-unordered-container-size-type +// UNSUPPORTED: libcpp-has-abi-fix-unordered-container-size-type, libcpp-abi-no-compressed-pair-padding #include <cstdint> #include <unordered_set> @@ -92,7 +92,8 @@ static_assert(TEST_ALIGNOF(unordered_set_alloc<char, final_small_iter_allocator< struct TEST_ALIGNAS(32) AlignedHash {}; struct UnalignedEqualTo {}; -static_assert(sizeof(std::unordered_set<int, AlignedHash, UnalignedEqualTo>) == 96, ""); +// This part of the ABI has been broken between LLVM 19 and LLVM 20. +static_assert(sizeof(std::unordered_set<int, AlignedHash, UnalignedEqualTo>) == 64, ""); static_assert(TEST_ALIGNOF(std::unordered_set<int, AlignedHash, UnalignedEqualTo>) == 32, ""); #elif __SIZE_WIDTH__ == 32 @@ -124,7 +125,7 @@ static_assert(TEST_ALIGNOF(unordered_set_alloc<char, final_small_iter_allocator< struct TEST_ALIGNAS(32) AlignedHash {}; struct UnalignedEqualTo {}; -static_assert(sizeof(std::unordered_set<int, AlignedHash, UnalignedEqualTo>) == 96); +static_assert(sizeof(std::unordered_set<int, AlignedHash, UnalignedEqualTo>) == 64); static_assert(TEST_ALIGNOF(std::unordered_set<int, AlignedHash, UnalignedEqualTo>) == 32); #else diff --git a/libcxx/test/libcxx/containers/sequences/deque/abi.compile.pass.cpp b/libcxx/test/libcxx/containers/sequences/deque/abi.compile.pass.cpp index 7d2dd21..30586d8 100644 --- a/libcxx/test/libcxx/containers/sequences/deque/abi.compile.pass.cpp +++ b/libcxx/test/libcxx/containers/sequences/deque/abi.compile.pass.cpp @@ -6,6 +6,8 @@ // //===----------------------------------------------------------------------===// +// UNSUPPORTED: libcpp-abi-no-compressed-pair-padding + #include <cstdint> #include <deque> diff --git a/libcxx/test/libcxx/containers/sequences/vector.bool/abi.compile.pass.cpp b/libcxx/test/libcxx/containers/sequences/vector.bool/abi.compile.pass.cpp index 48337a0c..6e6ea67 100644 --- a/libcxx/test/libcxx/containers/sequences/vector.bool/abi.compile.pass.cpp +++ b/libcxx/test/libcxx/containers/sequences/vector.bool/abi.compile.pass.cpp @@ -6,6 +6,8 @@ // //===----------------------------------------------------------------------===// +// UNSUPPORTED: libcpp-abi-no-compressed-pair-padding + #include <vector> #include "min_allocator.h" diff --git a/libcxx/test/libcxx/containers/unord/unord.set/missing_hash_specialization.verify.cpp b/libcxx/test/libcxx/containers/unord/unord.set/missing_hash_specialization.verify.cpp index f492b76..f6d93c7 100644 --- a/libcxx/test/libcxx/containers/unord/unord.set/missing_hash_specialization.verify.cpp +++ b/libcxx/test/libcxx/containers/unord/unord.set/missing_hash_specialization.verify.cpp @@ -48,11 +48,10 @@ int main(int, char**) { using Set = std::unordered_set<VT>; Set s; // expected-error@__hash_table:* {{the specified hash does not meet the Hash requirements}} - - // FIXME: It would be great to suppress the below diagnostic all together. - // but for now it's sufficient that it appears last. However there is - // currently no way to test the order diagnostics are issued. - // expected-error@*:* {{call to implicitly-deleted default constructor of 'std::}} + // FIXME: It would be great to suppress the below diagnostic all together. + // but for now it's sufficient that it appears last. However there is + // currently no way to test the order diagnostics are issued. + // expected-error@*:* {{call to implicitly-deleted default constructor}} } { using Set = std::unordered_set<int, BadHashNoCopy>; diff --git a/libcxx/test/libcxx/diagnostics/memory.nodiscard.verify.cpp b/libcxx/test/libcxx/diagnostics/memory.nodiscard.verify.cpp index 646569e..6410c84 100644 --- a/libcxx/test/libcxx/diagnostics/memory.nodiscard.verify.cpp +++ b/libcxx/test/libcxx/diagnostics/memory.nodiscard.verify.cpp @@ -10,6 +10,7 @@ // check that <memory> functions are marked [[nodiscard]] +// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_TEMPORARY_BUFFER // ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS // clang-format off diff --git a/libcxx/test/libcxx/memory/compressed_pair/compressed_pair.pass.cpp b/libcxx/test/libcxx/memory/compressed_pair/compressed_pair.pass.cpp deleted file mode 100644 index 4258089..0000000 --- a/libcxx/test/libcxx/memory/compressed_pair/compressed_pair.pass.cpp +++ /dev/null @@ -1,52 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include <__memory/compressed_pair.h> -#include <assert.h> -#include <new> - -#include "test_macros.h" - -typedef std::__compressed_pair<int, unsigned> IntPair; - -void test_constructor() { - IntPair value; - assert(value.first() == 0); - assert(value.second() == 0); - - value.first() = 1; - value.second() = 2; - new (&value) IntPair; - assert(value.first() == 0); - assert(value.second() == 0); -} - -void test_constructor_default_init() { - IntPair value; - value.first() = 1; - value.second() = 2; - - new (&value) IntPair(std::__default_init_tag(), 3); - assert(value.first() == 1); - assert(value.second() == 3); - - new (&value) IntPair(4, std::__default_init_tag()); - assert(value.first() == 4); - assert(value.second() == 3); - - new (&value) IntPair(std::__default_init_tag(), std::__default_init_tag()); - assert(value.first() == 4); - assert(value.second() == 3); -} - -int main(int, char**) -{ - test_constructor(); - test_constructor_default_init(); - return 0; -} diff --git a/libcxx/test/libcxx/type_traits/datasizeof.compile.pass.cpp b/libcxx/test/libcxx/type_traits/datasizeof.compile.pass.cpp index 90463b0..51e3a85 100644 --- a/libcxx/test/libcxx/type_traits/datasizeof.compile.pass.cpp +++ b/libcxx/test/libcxx/type_traits/datasizeof.compile.pass.cpp @@ -26,6 +26,10 @@ struct Empty {}; static_assert(std::__datasizeof_v<Empty> == 0, ""); +struct FinalEmpty final {}; + +static_assert(std::__datasizeof_v<FinalEmpty> == 0, ""); + struct OneBytePadding final { OneBytePadding() {} diff --git a/libcxx/test/libcxx/utilities/memory/util.smartptr/util.smartptr.shared/libcxx.control_block_layout.pass.cpp b/libcxx/test/libcxx/utilities/memory/util.smartptr/util.smartptr.shared/libcxx.control_block_layout.pass.cpp index 0af79ee..a298f55 100644 --- a/libcxx/test/libcxx/utilities/memory/util.smartptr/util.smartptr.shared/libcxx.control_block_layout.pass.cpp +++ b/libcxx/test/libcxx/utilities/memory/util.smartptr/util.smartptr.shared/libcxx.control_block_layout.pass.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// // UNSUPPORTED: c++03 +// UNSUPPORTED: libcpp-abi-no-compressed-pair-padding // This test makes sure that the control block implementation used for non-array // types in std::make_shared and std::allocate_shared is ABI compatible with the @@ -18,6 +19,7 @@ #include <cassert> #include <cstddef> #include <memory> +#include <tuple> #include <type_traits> #include <utility> @@ -26,6 +28,44 @@ #include "test_macros.h" +struct value_init_tag {}; + +template <class T, int _Idx, bool CanBeEmptyBase = std::is_empty<T>::value && !std::__libcpp_is_final<T>::value> +struct compressed_pair_elem { + explicit compressed_pair_elem(value_init_tag) : value_() {} + + template <class U> + explicit compressed_pair_elem(U&& u) : value_(std::forward<U>(u)) {} + + T& get() { return value_; } + +private: + T value_; +}; + +template <class T, int _Idx> +struct compressed_pair_elem<T, _Idx, true> : private T { + explicit compressed_pair_elem(value_init_tag) : T() {} + + template <class U> + explicit compressed_pair_elem(U&& u) : T(std::forward<U>(u)) {} + + T& get() { return *this; } +}; + +template <class T1, class T2> +class compressed_pair : private compressed_pair_elem<T1, 0>, private compressed_pair_elem<T2, 1> { +public: + using Base1 = compressed_pair_elem<T1, 0>; + using Base2 = compressed_pair_elem<T2, 1>; + + template <class U1, class U2> + explicit compressed_pair(U1&& t1, U2&& t2) : Base1(std::forward<U1>(t1)), Base2(std::forward<U2>(t2)) {} + + T1& first() { return static_cast<Base1&>(*this).get(); } + T2& second() { return static_cast<Base2&>(*this).get(); } +}; + // This is the pre-C++20 implementation of the control block used by non-array // std::allocate_shared and std::make_shared. We keep it here so that we can // make sure our implementation is backwards compatible with it forever. @@ -33,10 +73,8 @@ // Of course, the class and its methods were renamed, but the size and layout // of the class should remain the same as the original implementation. template <class T, class Alloc> -struct OldEmplaceControlBlock - : std::__shared_weak_count -{ - explicit OldEmplaceControlBlock(Alloc a) : data_(std::move(a), std::__value_init_tag()) { } +struct OldEmplaceControlBlock : std::__shared_weak_count { + explicit OldEmplaceControlBlock(Alloc a) : data_(std::move(a), value_init_tag()) {} T* get_elem() noexcept { return std::addressof(data_.second()); } Alloc* get_alloc() noexcept { return std::addressof(data_.first()); } @@ -49,7 +87,7 @@ private: // Not implemented } - std::__compressed_pair<Alloc, T> data_; + compressed_pair<Alloc, T> data_; }; template <class T, template <class> class Alloc> @@ -67,8 +105,8 @@ void test() { // 1. Check the stored object { - char const* old_elem = reinterpret_cast<char const*>(old.get_elem()); - char const* new_elem = reinterpret_cast<char const*>(new_.__get_elem()); + char const* old_elem = reinterpret_cast<char const*>(old.get_elem()); + char const* new_elem = reinterpret_cast<char const*>(new_.__get_elem()); std::ptrdiff_t old_offset = old_elem - reinterpret_cast<char const*>(&old); std::ptrdiff_t new_offset = new_elem - reinterpret_cast<char const*>(&new_); assert(new_offset == old_offset && "offset of stored element changed"); @@ -76,8 +114,8 @@ void test() { // 2. Check the allocator { - char const* old_alloc = reinterpret_cast<char const*>(old.get_alloc()); - char const* new_alloc = reinterpret_cast<char const*>(new_.__get_alloc()); + char const* old_alloc = reinterpret_cast<char const*>(old.get_alloc()); + char const* new_alloc = reinterpret_cast<char const*>(new_.__get_alloc()); std::ptrdiff_t old_offset = old_alloc - reinterpret_cast<char const*>(&old); std::ptrdiff_t new_offset = new_alloc - reinterpret_cast<char const*>(&new_); assert(new_offset == old_offset && "offset of allocator changed"); @@ -89,48 +127,66 @@ void test() { } // Object types to store in the control block -struct TrivialEmptyType { }; -struct TrivialNonEmptyType { char c[11]; }; -struct FinalEmptyType final { }; +struct TrivialEmptyType {}; + +struct alignas(32) OveralignedEmptyType {}; + +struct TrivialNonEmptyType { + char c[11]; +}; + +struct FinalEmptyType final {}; + struct NonTrivialType { char c[22]; - NonTrivialType() : c{'x'} { } + NonTrivialType() : c{'x'} {} +}; + +struct VirtualFunctionType { + virtual ~VirtualFunctionType() {} }; // Allocator types template <class T> struct TrivialEmptyAlloc { - using value_type = T; + using value_type = T; TrivialEmptyAlloc() = default; - template <class U> TrivialEmptyAlloc(TrivialEmptyAlloc<U>) { } + template <class U> + TrivialEmptyAlloc(TrivialEmptyAlloc<U>) {} T* allocate(std::size_t) { return nullptr; } - void deallocate(T*, std::size_t) { } + void deallocate(T*, std::size_t) {} }; + template <class T> struct TrivialNonEmptyAlloc { char storage[77]; - using value_type = T; + using value_type = T; TrivialNonEmptyAlloc() = default; - template <class U> TrivialNonEmptyAlloc(TrivialNonEmptyAlloc<U>) { } + template <class U> + TrivialNonEmptyAlloc(TrivialNonEmptyAlloc<U>) {} T* allocate(std::size_t) { return nullptr; } - void deallocate(T*, std::size_t) { } + void deallocate(T*, std::size_t) {} }; + template <class T> struct FinalEmptyAlloc final { - using value_type = T; + using value_type = T; FinalEmptyAlloc() = default; - template <class U> FinalEmptyAlloc(FinalEmptyAlloc<U>) { } + template <class U> + FinalEmptyAlloc(FinalEmptyAlloc<U>) {} T* allocate(std::size_t) { return nullptr; } - void deallocate(T*, std::size_t) { } + void deallocate(T*, std::size_t) {} }; + template <class T> struct NonTrivialAlloc { char storage[88]; using value_type = T; - NonTrivialAlloc() { } - template <class U> NonTrivialAlloc(NonTrivialAlloc<U>) { } + NonTrivialAlloc() {} + template <class U> + NonTrivialAlloc(NonTrivialAlloc<U>) {} T* allocate(std::size_t) { return nullptr; } - void deallocate(T*, std::size_t) { } + void deallocate(T*, std::size_t) {} }; int main(int, char**) { @@ -139,21 +195,30 @@ int main(int, char**) { test<TrivialEmptyType, FinalEmptyAlloc>(); test<TrivialEmptyType, NonTrivialAlloc>(); + test<OveralignedEmptyType, TrivialEmptyAlloc>(); + test<OveralignedEmptyType, TrivialNonEmptyAlloc>(); + test<OveralignedEmptyType, FinalEmptyAlloc>(); + test<OveralignedEmptyType, NonTrivialAlloc>(); + test<TrivialNonEmptyType, TrivialEmptyAlloc>(); test<TrivialNonEmptyType, TrivialNonEmptyAlloc>(); test<TrivialNonEmptyType, FinalEmptyAlloc>(); test<TrivialNonEmptyType, NonTrivialAlloc>(); test<FinalEmptyType, TrivialEmptyAlloc>(); - test<FinalEmptyType, TrivialNonEmptyAlloc>(); - test<FinalEmptyType, FinalEmptyAlloc>(); - test<FinalEmptyType, NonTrivialAlloc>(); + // FinalEmptyType combined with TrivialNonEmptyAlloc, FinalEmptyAlloc or NonTrivialAlloc is known to have an ABI break + // between LLVM 19 and LLVM 20. It's been deemed not severe enough to cause actual breakage. test<NonTrivialType, TrivialEmptyAlloc>(); test<NonTrivialType, TrivialNonEmptyAlloc>(); test<NonTrivialType, FinalEmptyAlloc>(); test<NonTrivialType, NonTrivialAlloc>(); + test<VirtualFunctionType, TrivialEmptyAlloc>(); + test<VirtualFunctionType, TrivialNonEmptyAlloc>(); + test<VirtualFunctionType, FinalEmptyAlloc>(); + test<VirtualFunctionType, NonTrivialAlloc>(); + // Test a few real world types just to make sure we didn't mess up badly somehow test<std::string, std::allocator>(); test<int, std::allocator>(); diff --git a/libcxx/test/std/algorithms/alg.modifying.operations/alg.partitions/stable_partition.pass.cpp b/libcxx/test/std/algorithms/alg.modifying.operations/alg.partitions/stable_partition.pass.cpp index 85d12d0..4402754 100644 --- a/libcxx/test/std/algorithms/alg.modifying.operations/alg.partitions/stable_partition.pass.cpp +++ b/libcxx/test/std/algorithms/alg.modifying.operations/alg.partitions/stable_partition.pass.cpp @@ -282,9 +282,9 @@ test() assert(array[9] == P(0, 2)); } #if TEST_STD_VER >= 11 && !defined(TEST_HAS_NO_EXCEPTIONS) - // TODO: Re-enable this test once we're no longer using get_temporary_buffer(). + // TODO: Re-enable this test once we get recursive inlining fixed. // For now it trips up GCC due to the use of always_inline. -#if 0 +# if 0 { // check that the algorithm still works when no memory is available std::vector<int> vec(150, 3); vec[5] = 6; @@ -300,7 +300,7 @@ test() assert(std::is_partitioned(vec.begin(), vec.end(), [](int i) { return i < 5; })); getGlobalMemCounter()->reset(); } -#endif +# endif #endif // TEST_STD_VER >= 11 && !defined(TEST_HAS_NO_EXCEPTIONS) } diff --git a/libcxx/test/std/depr/depr.c.headers/wchar_h.compile.pass.cpp b/libcxx/test/std/depr/depr.c.headers/wchar_h.compile.pass.cpp index ae08ebd..98d028c 100644 --- a/libcxx/test/std/depr/depr.c.headers/wchar_h.compile.pass.cpp +++ b/libcxx/test/std/depr/depr.c.headers/wchar_h.compile.pass.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// -// XFAIL: no-wide-characters +// UNSUPPORTED: no-wide-characters // <wchar.h> diff --git a/libcxx/test/std/depr/depr.c.headers/wctype_h.compile.pass.cpp b/libcxx/test/std/depr/depr.c.headers/wctype_h.compile.pass.cpp index 094f7713..35b2945 100644 --- a/libcxx/test/std/depr/depr.c.headers/wctype_h.compile.pass.cpp +++ b/libcxx/test/std/depr/depr.c.headers/wctype_h.compile.pass.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// -// XFAIL: no-wide-characters +// UNSUPPORTED: no-wide-characters // <wctype.h> diff --git a/libcxx/test/std/strings/c.strings/cwchar.pass.cpp b/libcxx/test/std/strings/c.strings/cwchar.pass.cpp index 3aa660f..0caf4b9 100644 --- a/libcxx/test/std/strings/c.strings/cwchar.pass.cpp +++ b/libcxx/test/std/strings/c.strings/cwchar.pass.cpp @@ -8,7 +8,7 @@ // <cwchar> -// XFAIL: no-wide-characters +// UNSUPPORTED: no-wide-characters #include <cwchar> #include <ctime> diff --git a/libcxx/test/std/strings/c.strings/cwchar_include_order1.compile.verify.cpp b/libcxx/test/std/strings/c.strings/cwchar_include_order1.compile.verify.cpp index 4b5460a..10560e5 100644 --- a/libcxx/test/std/strings/c.strings/cwchar_include_order1.compile.verify.cpp +++ b/libcxx/test/std/strings/c.strings/cwchar_include_order1.compile.verify.cpp @@ -8,7 +8,7 @@ // <cwchar> -// XFAIL: no-wide-characters +// UNSUPPORTED: no-wide-characters // Tests that include ordering does not affect the definition of wcsstr. // See: https://llvm.org/PR62638 diff --git a/libcxx/test/std/strings/c.strings/cwchar_include_order2.compile.verify.cpp b/libcxx/test/std/strings/c.strings/cwchar_include_order2.compile.verify.cpp index 0222ac0..4be7999 100644 --- a/libcxx/test/std/strings/c.strings/cwchar_include_order2.compile.verify.cpp +++ b/libcxx/test/std/strings/c.strings/cwchar_include_order2.compile.verify.cpp @@ -8,7 +8,7 @@ // <cwchar> -// XFAIL: no-wide-characters +// UNSUPPORTED: no-wide-characters // Tests that include ordering does not affect the definition of wcsstr. // See: https://llvm.org/PR62638 diff --git a/libcxx/test/std/strings/c.strings/cwctype.pass.cpp b/libcxx/test/std/strings/c.strings/cwctype.pass.cpp index d421bea..5bc2531 100644 --- a/libcxx/test/std/strings/c.strings/cwctype.pass.cpp +++ b/libcxx/test/std/strings/c.strings/cwctype.pass.cpp @@ -8,7 +8,7 @@ // <cwctype> -// XFAIL: no-wide-characters +// UNSUPPORTED: no-wide-characters #include <cwctype> #include <type_traits> diff --git a/libcxx/test/std/utilities/function.objects/func.wrap/func.wrap.func/func.wrap.func.con/copy_move.pass.cpp b/libcxx/test/std/utilities/function.objects/func.wrap/func.wrap.func/func.wrap.func.con/copy_move.pass.cpp index 5b3f4f1..5afc3ad 100644 --- a/libcxx/test/std/utilities/function.objects/func.wrap/func.wrap.func/func.wrap.func.con/copy_move.pass.cpp +++ b/libcxx/test/std/utilities/function.objects/func.wrap/func.wrap.func/func.wrap.func.con/copy_move.pass.cpp @@ -6,10 +6,6 @@ // //===----------------------------------------------------------------------===// -// FIXME: In MSVC mode, even "std::function<int(int)> f(aref);" causes -// allocations. -// XFAIL: target=x86_64-pc-windows-msvc && stdlib=libc++ && libcpp-abi-version=1 - // UNSUPPORTED: c++03 // <functional> diff --git a/libcxx/test/std/utilities/memory/temporary.buffer/overaligned.pass.cpp b/libcxx/test/std/utilities/memory/temporary.buffer/overaligned.pass.cpp index 8499478..4c66370 100644 --- a/libcxx/test/std/utilities/memory/temporary.buffer/overaligned.pass.cpp +++ b/libcxx/test/std/utilities/memory/temporary.buffer/overaligned.pass.cpp @@ -8,6 +8,7 @@ // UNSUPPORTED: c++03 +// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_TEMPORARY_BUFFER // ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS // <memory> diff --git a/libcxx/test/std/utilities/memory/temporary.buffer/temporary_buffer.pass.cpp b/libcxx/test/std/utilities/memory/temporary.buffer/temporary_buffer.pass.cpp index 6c0fbf2..5f7fc45 100644 --- a/libcxx/test/std/utilities/memory/temporary.buffer/temporary_buffer.pass.cpp +++ b/libcxx/test/std/utilities/memory/temporary.buffer/temporary_buffer.pass.cpp @@ -8,6 +8,7 @@ // <memory> +// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_TEMPORARY_BUFFER // ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS // template <class T> diff --git a/libcxx/utils/ci/docker-compose.yml b/libcxx/utils/ci/docker-compose.yml index df5df8c..795e0dc 100644 --- a/libcxx/utils/ci/docker-compose.yml +++ b/libcxx/utils/ci/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.7' x-versions: &compiler_versions GCC_LATEST_VERSION: 14 - LLVM_HEAD_VERSION: 19 + LLVM_HEAD_VERSION: 20 services: buildkite-builder: diff --git a/libcxx/utils/gdb/libcxx/printers.py b/libcxx/utils/gdb/libcxx/printers.py index 3f39232..49087f9 100644 --- a/libcxx/utils/gdb/libcxx/printers.py +++ b/libcxx/utils/gdb/libcxx/printers.py @@ -154,11 +154,6 @@ def _typename_with_n_generic_arguments(gdb_type, n): result = (template[:-2] + ">") % tuple(arg_list) return result - -def _typename_with_first_generic_argument(gdb_type): - return _typename_with_n_generic_arguments(gdb_type, 1) - - class StdTuplePrinter(object): """Print a std::tuple.""" @@ -196,33 +191,6 @@ class StdTuplePrinter(object): return iter(()) return self._Children(self.val) - -def _get_base_subobject(child_class_value, index=0): - """Returns the object's value in the form of the parent class at index. - - This function effectively casts the child_class_value to the base_class's - type, but the type-to-cast to is stored in the field at index, and once - we know the field, we can just return the data. - - Args: - child_class_value: the value to cast - index: the parent class index - - Raises: - Exception: field at index was not a base-class field. - """ - - field = child_class_value.type.fields()[index] - if not field.is_base_class: - raise Exception("Not a base-class field.") - return child_class_value[field] - - -def _value_of_pair_first(value): - """Convenience for _get_base_subobject, for the common case.""" - return _get_base_subobject(value, 0)["__value_"] - - class StdStringPrinter(object): """Print a std::string.""" @@ -231,7 +199,7 @@ class StdStringPrinter(object): def to_string(self): """Build a python string from the data whether stored inline or separately.""" - value_field = _value_of_pair_first(self.val["__r_"]) + value_field = self.val["__rep_"] short_field = value_field["__s"] short_size = short_field["__size_"] if short_field["__is_long_"]: @@ -270,7 +238,7 @@ class StdUniquePtrPrinter(object): def __init__(self, val): self.val = val - self.addr = _value_of_pair_first(self.val["__ptr_"]) + self.addr = self.val["__ptr_"] self.pointee_type = self.val.type.template_argument(0) def to_string(self): @@ -397,16 +365,12 @@ class StdVectorPrinter(object): self.typename += "<bool>" self.length = self.val["__size_"] bits_per_word = self.val["__bits_per_word"] - self.capacity = ( - _value_of_pair_first(self.val["__cap_alloc_"]) * bits_per_word - ) + self.capacity = self.val["__cap_"] * bits_per_word self.iterator = self._VectorBoolIterator(begin, self.length, bits_per_word) else: end = self.val["__end_"] self.length = end - begin - self.capacity = ( - _get_base_subobject(self.val["__end_cap_"])["__value_"] - begin - ) + self.capacity = self.val["__cap_"] - begin self.iterator = self._VectorIterator(begin, end) def to_string(self): @@ -461,7 +425,7 @@ class StdDequePrinter(object): def __init__(self, val): self.val = val - self.size = int(_value_of_pair_first(val["__size_"])) + self.size = int(val["__size_"]) self.start_ptr = self.val["__map_"]["__begin_"] self.first_block_start_index = int(self.val["__start_"]) self.node_type = self.start_ptr.type @@ -513,8 +477,7 @@ class StdListPrinter(object): def __init__(self, val): self.val = val - size_alloc_field = self.val["__size_alloc_"] - self.size = int(_value_of_pair_first(size_alloc_field)) + self.size = int(self.val["__size_"]) dummy_node = self.val["__end_"] self.nodetype = gdb.lookup_type( re.sub( @@ -646,9 +609,8 @@ class AbstractRBTreePrinter(object): def __init__(self, val): self.val = val tree = self.val["__tree_"] - self.size = int(_value_of_pair_first(tree["__pair3_"])) - dummy_root = tree["__pair1_"] - root = _value_of_pair_first(dummy_root)["__left_"] + self.size = int(tree["__size_"]) + root = tree["__end_node_"]["__left_"] cast_type = self._init_cast_type(val.type) self.util = RBTreeUtils(cast_type, root) @@ -815,13 +777,13 @@ class AbstractUnorderedCollectionPrinter(object): def __init__(self, val): self.val = val self.table = val["__table_"] - self.sentinel = self.table["__p1_"] - self.size = int(_value_of_pair_first(self.table["__p2_"])) - node_base_type = self.sentinel.type.template_argument(0) + self.sentinel = self.table["__first_node_"] + self.size = int(self.table["__size_"]) + node_base_type = self.sentinel.type self.cast_type = node_base_type.template_argument(0) def _list_it(self, sentinel_ptr): - next_ptr = _value_of_pair_first(sentinel_ptr)["__next_"] + next_ptr = sentinel_ptr["__next_"] while str(next_ptr.cast(_void_pointer_type)) != "0x0": next_val = next_ptr.cast(self.cast_type).dereference() for key_value in self._get_key_value(next_val): diff --git a/libcxx/utils/libcxx/header_information.py b/libcxx/utils/libcxx/header_information.py index 6944021..fd48b35 100644 --- a/libcxx/utils/libcxx/header_information.py +++ b/libcxx/utils/libcxx/header_information.py @@ -30,13 +30,6 @@ header_restrictions = { "streambuf": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)", "strstream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)", "syncstream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)", - - # headers with #error directives - "wchar.h": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)", - "wctype.h": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)", - # transitive includers of the above headers - "cwchar": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)", - "cwctype": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)", } lit_header_restrictions = { diff --git a/libcxx/utils/libcxx/test/features.py b/libcxx/utils/libcxx/test/features.py index 2cd0412..1545617 100644 --- a/libcxx/utils/libcxx/test/features.py +++ b/libcxx/utils/libcxx/test/features.py @@ -376,6 +376,7 @@ macros = { "_LIBCPP_ABI_BOUNDED_ITERATORS_IN_VECTOR": "libcpp-has-abi-bounded-iterators-in-vector", "_LIBCPP_ABI_FIX_UNORDERED_CONTAINER_SIZE_TYPE": "libcpp-has-abi-fix-unordered-container-size-type", "_LIBCPP_DEPRECATED_ABI_DISABLE_PAIR_TRIVIAL_COPY_CTOR": "libcpp-deprecated-abi-disable-pair-trivial-copy-ctor", + "_LIBCPP_ABI_NO_COMPRESSED_PAIR_PADDING": "libcpp-abi-no-compressed-pair-padding", "_LIBCPP_HAS_NO_FILESYSTEM": "no-filesystem", "_LIBCPP_HAS_NO_RANDOM_DEVICE": "no-random-device", "_LIBCPP_HAS_NO_LOCALIZATION": "no-localization", diff --git a/libcxx/utils/libcxx/test/modules.py b/libcxx/utils/libcxx/test/modules.py index 91933d4..4824ca3 100644 --- a/libcxx/utils/libcxx/test/modules.py +++ b/libcxx/utils/libcxx/test/modules.py @@ -52,13 +52,6 @@ SkipDeclarations["random"] = [ "std::operator==", ] -# TODO MODULES remove zombie names -# https://libcxx.llvm.org/Status/Cxx20.html#note-p0619 -SkipDeclarations["memory"] = [ - "std::return_temporary_buffer", - "std::get_temporary_buffer", -] - # include/__type_traits/is_swappable.h SkipDeclarations["type_traits"] = [ "std::swap", diff --git a/lld/COFF/Chunks.cpp b/lld/COFF/Chunks.cpp index ee54fa3..6510c63 100644 --- a/lld/COFF/Chunks.cpp +++ b/lld/COFF/Chunks.cpp @@ -385,7 +385,7 @@ static void maybeReportRelocationToDiscarded(const SectionChunk *fromChunk, os << "relocation against symbol in discarded section: " + name; for (const std::string &s : symbolLocations) os << s; - error(os.str()); + error(out); } void SectionChunk::writeTo(uint8_t *buf) const { diff --git a/lld/COFF/DriverUtils.cpp b/lld/COFF/DriverUtils.cpp index 6e8f74c..39af621 100644 --- a/lld/COFF/DriverUtils.cpp +++ b/lld/COFF/DriverUtils.cpp @@ -415,7 +415,7 @@ std::string LinkerDriver::createDefaultXml() { << " </dependency>\n"; } os << "</assembly>\n"; - return os.str(); + return ret; } std::string diff --git a/lld/COFF/SymbolTable.cpp b/lld/COFF/SymbolTable.cpp index 582a8562..fa40335 100644 --- a/lld/COFF/SymbolTable.cpp +++ b/lld/COFF/SymbolTable.cpp @@ -275,7 +275,7 @@ static void reportUndefinedSymbol(const COFFLinkerContext &ctx, } if (numDisplayedRefs < numRefs) os << "\n>>> referenced " << numRefs - numDisplayedRefs << " more times"; - errorOrWarn(os.str(), ctx.config.forceUnresolved); + errorOrWarn(out, ctx.config.forceUnresolved); } void SymbolTable::loadMinGWSymbols() { @@ -667,7 +667,7 @@ static std::string getSourceLocationObj(ObjFile *file, SectionChunk *sc, if (fileLine) os << fileLine->first << ":" << fileLine->second << "\n>>> "; os << toString(file); - return os.str(); + return res; } static std::string getSourceLocation(InputFile *file, SectionChunk *sc, @@ -706,9 +706,9 @@ void SymbolTable::reportDuplicate(Symbol *existing, InputFile *newFile, existing->getName()); if (ctx.config.forceMultiple) - warn(os.str()); + warn(msg); else - error(os.str()); + error(msg); } Symbol *SymbolTable::addAbsolute(StringRef n, COFFSymbolRef sym) { diff --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp index 7ed4bd2..36880bf 100644 --- a/lld/ELF/Arch/AArch64.cpp +++ b/lld/ELF/Arch/AArch64.cpp @@ -322,7 +322,7 @@ int64_t AArch64::getImplicitAddend(const uint8_t *buf, RelType type) const { } void AArch64::writeGotPlt(uint8_t *buf, const Symbol &) const { - write64(buf, in.plt->getVA()); + write64(buf, ctx.in.plt->getVA()); } void AArch64::writeIgotPlt(uint8_t *buf, const Symbol &s) const { @@ -343,8 +343,8 @@ void AArch64::writePltHeader(uint8_t *buf) const { }; memcpy(buf, pltData, sizeof(pltData)); - uint64_t got = in.gotPlt->getVA(); - uint64_t plt = in.plt->getVA(); + uint64_t got = ctx.in.gotPlt->getVA(); + uint64_t plt = ctx.in.plt->getVA(); relocateNoSym(buf + 4, R_AARCH64_ADR_PREL_PG_HI21, getAArch64Page(got + 16) - getAArch64Page(plt + 4)); relocateNoSym(buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, got + 16); @@ -1003,8 +1003,8 @@ void AArch64BtiPac::writePltHeader(uint8_t *buf) const { }; const uint8_t nopData[] = { 0x1f, 0x20, 0x03, 0xd5 }; // nop - uint64_t got = in.gotPlt->getVA(); - uint64_t plt = in.plt->getVA(); + uint64_t got = ctx.in.gotPlt->getVA(); + uint64_t plt = ctx.in.plt->getVA(); if (btiHeader) { // PltHeader is called indirectly by plt[N]. Prefix pltData with a BTI C diff --git a/lld/ELF/Arch/ARM.cpp b/lld/ELF/Arch/ARM.cpp index 827ba3a..12b5769 100644 --- a/lld/ELF/Arch/ARM.cpp +++ b/lld/ELF/Arch/ARM.cpp @@ -204,7 +204,7 @@ RelType ARM::getDynRel(RelType type) const { } void ARM::writeGotPlt(uint8_t *buf, const Symbol &) const { - write32(buf, in.plt->getVA()); + write32(buf, ctx.in.plt->getVA()); } void ARM::writeIgotPlt(uint8_t *buf, const Symbol &s) const { @@ -223,8 +223,8 @@ static void writePltHeaderLong(uint8_t *buf) { write32(buf + 20, 0xd4d4d4d4); // Pad to 32-byte boundary write32(buf + 24, 0xd4d4d4d4); // Pad to 32-byte boundary write32(buf + 28, 0xd4d4d4d4); - uint64_t gotPlt = in.gotPlt->getVA(); - uint64_t l1 = in.plt->getVA() + 8; + uint64_t gotPlt = ctx.in.gotPlt->getVA(); + uint64_t l1 = ctx.in.plt->getVA() + 8; write32(buf + 16, gotPlt - l1 - 8); } @@ -249,7 +249,7 @@ void ARM::writePltHeader(uint8_t *buf) const { // At 0x8, we want to jump to .got.plt, the -16 accounts for 8 bytes from // `pc` in the add instruction and 8 bytes for the `lr` adjustment. // - uint64_t offset = in.gotPlt->getVA() - in.plt->getVA() - 16; + uint64_t offset = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA() - 16; assert(llvm::isUInt<32>(offset) && "This should always fit into a 32-bit offset"); write16(buf + 0, 0xb500); // Split into two halves to support endianness correctly. @@ -277,7 +277,7 @@ void ARM::writePltHeader(uint8_t *buf) const { 0xe5bef000, // ldr pc, [lr, #0x00000NNN] &(.got.plt -L1 - 4) }; - uint64_t offset = in.gotPlt->getVA() - in.plt->getVA() - 4; + uint64_t offset = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA() - 4; if (!llvm::isUInt<27>(offset)) { // We cannot encode the Offset, use the long form. writePltHeaderLong(buf); diff --git a/lld/ELF/Arch/Hexagon.cpp b/lld/ELF/Arch/Hexagon.cpp index abde3cd..a492d0a 100644 --- a/lld/ELF/Arch/Hexagon.cpp +++ b/lld/ELF/Arch/Hexagon.cpp @@ -359,7 +359,7 @@ void Hexagon::writePltHeader(uint8_t *buf) const { memcpy(buf, pltData, sizeof(pltData)); // Offset from PLT0 to the GOT. - uint64_t off = in.gotPlt->getVA() - in.plt->getVA(); + uint64_t off = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA(); relocateNoSym(buf, R_HEX_B32_PCREL_X, off); relocateNoSym(buf + 4, R_HEX_6_PCREL_X, off); } diff --git a/lld/ELF/Arch/LoongArch.cpp b/lld/ELF/Arch/LoongArch.cpp index 01e42a5..0044afb 100644 --- a/lld/ELF/Arch/LoongArch.cpp +++ b/lld/ELF/Arch/LoongArch.cpp @@ -308,9 +308,9 @@ int64_t LoongArch::getImplicitAddend(const uint8_t *buf, RelType type) const { void LoongArch::writeGotPlt(uint8_t *buf, const Symbol &s) const { if (config->is64) - write64le(buf, in.plt->getVA()); + write64le(buf, ctx.in.plt->getVA()); else - write32le(buf, in.plt->getVA()); + write32le(buf, ctx.in.plt->getVA()); } void LoongArch::writeIgotPlt(uint8_t *buf, const Symbol &s) const { @@ -341,7 +341,7 @@ void LoongArch::writePltHeader(uint8_t *buf) const { // srli.[wd] $t1, $t1, (is64?1:2) ; t1 = &.got.plt[i] - &.got.plt[0] // ld.[wd] $t0, $t0, Wordsize ; t0 = link_map // jr $t3 - uint32_t offset = in.gotPlt->getVA() - in.plt->getVA(); + uint32_t offset = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA(); uint32_t sub = config->is64 ? SUB_D : SUB_W; uint32_t ld = config->is64 ? LD_D : LD_W; uint32_t addi = config->is64 ? ADDI_D : ADDI_W; diff --git a/lld/ELF/Arch/Mips.cpp b/lld/ELF/Arch/Mips.cpp index 7c71371..f5920c7 100644 --- a/lld/ELF/Arch/Mips.cpp +++ b/lld/ELF/Arch/Mips.cpp @@ -205,7 +205,7 @@ template <class ELFT> RelType MIPS<ELFT>::getDynRel(RelType type) const { template <class ELFT> void MIPS<ELFT>::writeGotPlt(uint8_t *buf, const Symbol &) const { - uint64_t va = in.plt->getVA(); + uint64_t va = ctx.in.plt->getVA(); if (isMicroMips()) va |= 1; write32(buf, va); @@ -257,8 +257,8 @@ static void writeMicroRelocation16(uint8_t *loc, uint64_t v, uint8_t bitsSize, template <class ELFT> void MIPS<ELFT>::writePltHeader(uint8_t *buf) const { if (isMicroMips()) { - uint64_t gotPlt = in.gotPlt->getVA(); - uint64_t plt = in.plt->getVA(); + uint64_t gotPlt = ctx.in.gotPlt->getVA(); + uint64_t plt = ctx.in.plt->getVA(); // Overwrite trap instructions written by Writer::writeTrapInstr. memset(buf, 0, pltHeaderSize); @@ -310,7 +310,7 @@ template <class ELFT> void MIPS<ELFT>::writePltHeader(uint8_t *buf) const { write32(buf + 24, jalrInst); // jalr.hb $25 or jalr $25 write32(buf + 28, 0x2718fffe); // subu $24, $24, 2 - uint64_t gotPlt = in.gotPlt->getVA(); + uint64_t gotPlt = ctx.in.gotPlt->getVA(); writeValue(buf, gotPlt + 0x8000, 16, 16); writeValue(buf + 4, gotPlt, 16, 0); writeValue(buf + 8, gotPlt, 16, 0); diff --git a/lld/ELF/Arch/PPC.cpp b/lld/ELF/Arch/PPC.cpp index 186dcf2..53f760e 100644 --- a/lld/ELF/Arch/PPC.cpp +++ b/lld/ELF/Arch/PPC.cpp @@ -75,9 +75,10 @@ static void writeFromHalf16(uint8_t *loc, uint32_t insn) { void elf::writePPC32GlinkSection(uint8_t *buf, size_t numEntries) { // Create canonical PLT entries for non-PIE code. Compilers don't generate // non-GOT-non-PLT relocations referencing external functions for -fpie/-fPIE. - uint32_t glink = in.plt->getVA(); // VA of .glink + uint32_t glink = ctx.in.plt->getVA(); // VA of .glink if (!config->isPic) { - for (const Symbol *sym : cast<PPC32GlinkSection>(*in.plt).canonical_plts) { + for (const Symbol *sym : + cast<PPC32GlinkSection>(*ctx.in.plt).canonical_plts) { writePPC32PltCallStub(buf, sym->getGotPltVA(), nullptr, 0); buf += 16; glink += 16; @@ -101,10 +102,10 @@ void elf::writePPC32GlinkSection(uint8_t *buf, size_t numEntries) { // Then write PLTresolve(), which has two forms: PIC and non-PIC. PLTresolve() // computes the PLT index (by computing the distance from the landing b to // itself) and calls _dl_runtime_resolve() (in glibc). - uint32_t got = in.got->getVA(); + uint32_t got = ctx.in.got->getVA(); const uint8_t *end = buf + 64; if (config->isPic) { - uint32_t afterBcl = 4 * in.plt->getNumEntries() + 12; + uint32_t afterBcl = 4 * ctx.in.plt->getNumEntries() + 12; uint32_t gotBcl = got + 4 - (glink + afterBcl); write32(buf + 0, 0x3d6b0000 | ha(afterBcl)); // addis r11,r11,1f-glink@ha write32(buf + 4, 0x7c0802a6); // mflr r0 @@ -192,7 +193,8 @@ void PPC::writeGotHeader(uint8_t *buf) const { void PPC::writeGotPlt(uint8_t *buf, const Symbol &s) const { // Address of the symbol resolver stub in .glink . - write32(buf, in.plt->getVA() + in.plt->headerSize + 4 * s.getPltIdx()); + write32(buf, + ctx.in.plt->getVA() + ctx.in.plt->headerSize + 4 * s.getPltIdx()); } bool PPC::needsThunk(RelExpr expr, RelType type, const InputFile *file, diff --git a/lld/ELF/Arch/PPC64.cpp b/lld/ELF/Arch/PPC64.cpp index 15abbfd..e7617ca 100644 --- a/lld/ELF/Arch/PPC64.cpp +++ b/lld/ELF/Arch/PPC64.cpp @@ -210,7 +210,7 @@ uint64_t elf::getPPC64TocBase() { // TOC starts where the first of these sections starts. We always create a // .got when we see a relocation that uses it, so for us the start is always // the .got. - uint64_t tocVA = in.got->getVA(); + uint64_t tocVA = ctx.in.got->getVA(); // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000 // thus permitting a full 64 Kbytes segment. Note that the glibc startup @@ -1155,7 +1155,7 @@ void PPC64::writePltHeader(uint8_t *buf) const { // The 'bcl' instruction will set the link register to the address of the // following instruction ('mflr r11'). Here we store the offset from that // instruction to the first entry in the GotPlt section. - int64_t gotPltOffset = in.gotPlt->getVA() - (in.plt->getVA() + 8); + int64_t gotPltOffset = ctx.in.gotPlt->getVA() - (ctx.in.plt->getVA() + 8); write64(buf + 52, gotPltOffset); } diff --git a/lld/ELF/Arch/RISCV.cpp b/lld/ELF/Arch/RISCV.cpp index 2435864..2421890 100644 --- a/lld/ELF/Arch/RISCV.cpp +++ b/lld/ELF/Arch/RISCV.cpp @@ -207,9 +207,9 @@ void RISCV::writeGotHeader(uint8_t *buf) const { void RISCV::writeGotPlt(uint8_t *buf, const Symbol &s) const { if (config->is64) - write64le(buf, in.plt->getVA()); + write64le(buf, ctx.in.plt->getVA()); else - write32le(buf, in.plt->getVA()); + write32le(buf, ctx.in.plt->getVA()); } void RISCV::writeIgotPlt(uint8_t *buf, const Symbol &s) const { @@ -230,7 +230,7 @@ void RISCV::writePltHeader(uint8_t *buf) const { // srli t1, t1, (rv64?1:2); t1 = &.got.plt[i] - &.got.plt[0] // l[wd] t0, Wordsize(t0); t0 = link_map // jr t3 - uint32_t offset = in.gotPlt->getVA() - in.plt->getVA(); + uint32_t offset = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA(); uint32_t load = config->is64 ? LD : LW; write32le(buf + 0, utype(AUIPC, X_T2, hi20(offset))); write32le(buf + 4, rtype(SUB, X_T1, X_T1, X_T3)); @@ -1178,8 +1178,8 @@ mergeAttributesSection(const SmallVector<InputSectionBase *, 0> §ions) { unsigned firstStackAlignValue = 0, xlen = 0; bool hasArch = false; - in.riscvAttributes = std::make_unique<RISCVAttributesSection>(); - auto &merged = static_cast<RISCVAttributesSection &>(*in.riscvAttributes); + ctx.in.riscvAttributes = std::make_unique<RISCVAttributesSection>(); + auto &merged = static_cast<RISCVAttributesSection &>(*ctx.in.riscvAttributes); // Collect all tags values from attributes section. const auto &attributesTags = RISCVAttrs::getRISCVAttributeTags(); diff --git a/lld/ELF/Arch/SPARCV9.cpp b/lld/ELF/Arch/SPARCV9.cpp index 4ae742c..f7f296c 100644 --- a/lld/ELF/Arch/SPARCV9.cpp +++ b/lld/ELF/Arch/SPARCV9.cpp @@ -188,7 +188,7 @@ void SPARCV9::writePlt(uint8_t *buf, const Symbol & /*sym*/, }; memcpy(buf, pltData, sizeof(pltData)); - uint64_t off = pltEntryAddr - in.plt->getVA(); + uint64_t off = pltEntryAddr - ctx.in.plt->getVA(); relocateNoSym(buf, R_SPARC_22, off); relocateNoSym(buf + 4, R_SPARC_WDISP19, -(off + 4 - pltEntrySize)); } diff --git a/lld/ELF/Arch/SystemZ.cpp b/lld/ELF/Arch/SystemZ.cpp index 293df507..3d63747 100644 --- a/lld/ELF/Arch/SystemZ.cpp +++ b/lld/ELF/Arch/SystemZ.cpp @@ -203,15 +203,15 @@ void SystemZ::writePltHeader(uint8_t *buf) const { 0x07, 0x00, // nopr }; memcpy(buf, pltData, sizeof(pltData)); - uint64_t got = in.got->getVA(); - uint64_t plt = in.plt->getVA(); + uint64_t got = ctx.in.got->getVA(); + uint64_t plt = ctx.in.plt->getVA(); write32be(buf + 8, (got - plt - 6) >> 1); } void SystemZ::addPltHeaderSymbols(InputSection &isec) const { // The PLT header needs a reference to _GLOBAL_OFFSET_TABLE_, so we // must ensure the .got section is created even if otherwise unused. - in.got->hasGotOffRel.store(true, std::memory_order_relaxed); + ctx.in.got->hasGotOffRel.store(true, std::memory_order_relaxed); } void SystemZ::writePlt(uint8_t *buf, const Symbol &sym, @@ -228,8 +228,8 @@ void SystemZ::writePlt(uint8_t *buf, const Symbol &sym, memcpy(buf, inst, sizeof(inst)); write32be(buf + 2, (sym.getGotPltVA() - pltEntryAddr) >> 1); - write32be(buf + 24, (in.plt->getVA() - pltEntryAddr - 22) >> 1); - write32be(buf + 28, in.relaPlt->entsize * sym.getPltIdx()); + write32be(buf + 24, (ctx.in.plt->getVA() - pltEntryAddr - 22) >> 1); + write32be(buf + 28, ctx.in.relaPlt->entsize * sym.getPltIdx()); } int64_t SystemZ::getImplicitAddend(const uint8_t *buf, RelType type) const { diff --git a/lld/ELF/Arch/X86.cpp b/lld/ELF/Arch/X86.cpp index 20b69ad..6fb3623 100644 --- a/lld/ELF/Arch/X86.cpp +++ b/lld/ELF/Arch/X86.cpp @@ -203,14 +203,14 @@ void X86::writePltHeader(uint8_t *buf) const { 0x90, 0x90, 0x90, 0x90, // nop }; memcpy(buf, pltData, sizeof(pltData)); - uint32_t gotPlt = in.gotPlt->getVA(); + uint32_t gotPlt = ctx.in.gotPlt->getVA(); write32le(buf + 2, gotPlt + 4); write32le(buf + 8, gotPlt + 8); } void X86::writePlt(uint8_t *buf, const Symbol &sym, uint64_t pltEntryAddr) const { - unsigned relOff = in.relaPlt->entsize * sym.getPltIdx(); + unsigned relOff = ctx.in.relaPlt->entsize * sym.getPltIdx(); if (config->isPic) { const uint8_t inst[] = { 0xff, 0xa3, 0, 0, 0, 0, // jmp *foo@GOT(%ebx) @@ -218,7 +218,7 @@ void X86::writePlt(uint8_t *buf, const Symbol &sym, 0xe9, 0, 0, 0, 0, // jmp .PLT0@PC }; memcpy(buf, inst, sizeof(inst)); - write32le(buf + 2, sym.getGotPltVA() - in.gotPlt->getVA()); + write32le(buf + 2, sym.getGotPltVA() - ctx.in.gotPlt->getVA()); } else { const uint8_t inst[] = { 0xff, 0x25, 0, 0, 0, 0, // jmp *foo@GOT @@ -230,7 +230,7 @@ void X86::writePlt(uint8_t *buf, const Symbol &sym, } write32le(buf + 7, relOff); - write32le(buf + 12, in.plt->getVA() - pltEntryAddr - 16); + write32le(buf + 12, ctx.in.plt->getVA() - pltEntryAddr - 16); } int64_t X86::getImplicitAddend(const uint8_t *buf, RelType type) const { @@ -532,7 +532,7 @@ IntelIBT::IntelIBT() { pltHeaderSize = 0; } void IntelIBT::writeGotPlt(uint8_t *buf, const Symbol &s) const { uint64_t va = - in.ibtPlt->getVA() + IBTPltHeaderSize + s.getPltIdx() * pltEntrySize; + ctx.in.ibtPlt->getVA() + IBTPltHeaderSize + s.getPltIdx() * pltEntrySize; write32le(buf, va); } @@ -545,7 +545,7 @@ void IntelIBT::writePlt(uint8_t *buf, const Symbol &sym, 0x66, 0x0f, 0x1f, 0x44, 0, 0, // nop }; memcpy(buf, inst, sizeof(inst)); - write32le(buf + 6, sym.getGotPltVA() - in.gotPlt->getVA()); + write32le(buf + 6, sym.getGotPltVA() - ctx.in.gotPlt->getVA()); return; } @@ -630,7 +630,7 @@ void RetpolinePic::writePltHeader(uint8_t *buf) const { void RetpolinePic::writePlt(uint8_t *buf, const Symbol &sym, uint64_t pltEntryAddr) const { - unsigned relOff = in.relaPlt->entsize * sym.getPltIdx(); + unsigned relOff = ctx.in.relaPlt->entsize * sym.getPltIdx(); const uint8_t insn[] = { 0x50, // pushl %eax 0x8b, 0x83, 0, 0, 0, 0, // mov foo@GOT(%ebx), %eax @@ -642,8 +642,8 @@ void RetpolinePic::writePlt(uint8_t *buf, const Symbol &sym, }; memcpy(buf, insn, sizeof(insn)); - uint32_t ebx = in.gotPlt->getVA(); - unsigned off = pltEntryAddr - in.plt->getVA(); + uint32_t ebx = ctx.in.gotPlt->getVA(); + unsigned off = pltEntryAddr - ctx.in.plt->getVA(); write32le(buf + 3, sym.getGotPltVA() - ebx); write32le(buf + 8, -off - 12 + 32); write32le(buf + 13, -off - 17 + 18); @@ -682,14 +682,14 @@ void RetpolineNoPic::writePltHeader(uint8_t *buf) const { }; memcpy(buf, insn, sizeof(insn)); - uint32_t gotPlt = in.gotPlt->getVA(); + uint32_t gotPlt = ctx.in.gotPlt->getVA(); write32le(buf + 2, gotPlt + 4); write32le(buf + 8, gotPlt + 8); } void RetpolineNoPic::writePlt(uint8_t *buf, const Symbol &sym, uint64_t pltEntryAddr) const { - unsigned relOff = in.relaPlt->entsize * sym.getPltIdx(); + unsigned relOff = ctx.in.relaPlt->entsize * sym.getPltIdx(); const uint8_t insn[] = { 0x50, // 0: pushl %eax 0xa1, 0, 0, 0, 0, // 1: mov foo_in_GOT, %eax @@ -702,7 +702,7 @@ void RetpolineNoPic::writePlt(uint8_t *buf, const Symbol &sym, }; memcpy(buf, insn, sizeof(insn)); - unsigned off = pltEntryAddr - in.plt->getVA(); + unsigned off = pltEntryAddr - ctx.in.plt->getVA(); write32le(buf + 2, sym.getGotPltVA()); write32le(buf + 7, -off - 11 + 32); write32le(buf + 12, -off - 16 + 17); diff --git a/lld/ELF/Arch/X86_64.cpp b/lld/ELF/Arch/X86_64.cpp index 65a81fe..950bac8 100644 --- a/lld/ELF/Arch/X86_64.cpp +++ b/lld/ELF/Arch/X86_64.cpp @@ -432,8 +432,8 @@ void X86_64::writePltHeader(uint8_t *buf) const { 0x0f, 0x1f, 0x40, 0x00, // nop }; memcpy(buf, pltData, sizeof(pltData)); - uint64_t gotPlt = in.gotPlt->getVA(); - uint64_t plt = in.ibtPlt ? in.ibtPlt->getVA() : in.plt->getVA(); + uint64_t gotPlt = ctx.in.gotPlt->getVA(); + uint64_t plt = ctx.in.ibtPlt ? ctx.in.ibtPlt->getVA() : ctx.in.plt->getVA(); write32le(buf + 2, gotPlt - plt + 2); // GOTPLT+8 write32le(buf + 8, gotPlt - plt + 4); // GOTPLT+16 } @@ -449,7 +449,7 @@ void X86_64::writePlt(uint8_t *buf, const Symbol &sym, write32le(buf + 2, sym.getGotPltVA() - pltEntryAddr - 6); write32le(buf + 7, sym.getPltIdx()); - write32le(buf + 12, in.plt->getVA() - pltEntryAddr - 16); + write32le(buf + 12, ctx.in.plt->getVA() - pltEntryAddr - 16); } RelType X86_64::getDynRel(RelType type) const { @@ -1073,7 +1073,7 @@ IntelIBT::IntelIBT() { pltHeaderSize = 0; } void IntelIBT::writeGotPlt(uint8_t *buf, const Symbol &s) const { uint64_t va = - in.ibtPlt->getVA() + IBTPltHeaderSize + s.getPltIdx() * pltEntrySize; + ctx.in.ibtPlt->getVA() + IBTPltHeaderSize + s.getPltIdx() * pltEntrySize; write64le(buf, va); } @@ -1162,8 +1162,8 @@ void Retpoline::writePltHeader(uint8_t *buf) const { }; memcpy(buf, insn, sizeof(insn)); - uint64_t gotPlt = in.gotPlt->getVA(); - uint64_t plt = in.plt->getVA(); + uint64_t gotPlt = ctx.in.gotPlt->getVA(); + uint64_t plt = ctx.in.plt->getVA(); write32le(buf + 2, gotPlt - plt - 6 + 8); write32le(buf + 9, gotPlt - plt - 13 + 16); } @@ -1180,7 +1180,7 @@ void Retpoline::writePlt(uint8_t *buf, const Symbol &sym, }; memcpy(buf, insn, sizeof(insn)); - uint64_t off = pltEntryAddr - in.plt->getVA(); + uint64_t off = pltEntryAddr - ctx.in.plt->getVA(); write32le(buf + 3, sym.getGotPltVA() - pltEntryAddr - 7); write32le(buf + 8, -off - 12 + 32); @@ -1221,7 +1221,7 @@ void RetpolineZNow::writePlt(uint8_t *buf, const Symbol &sym, memcpy(buf, insn, sizeof(insn)); write32le(buf + 3, sym.getGotPltVA() - pltEntryAddr - 7); - write32le(buf + 8, in.plt->getVA() - pltEntryAddr - 12); + write32le(buf + 8, ctx.in.plt->getVA() - pltEntryAddr - 12); } static TargetInfo *getTargetInfo() { diff --git a/lld/ELF/Config.h b/lld/ELF/Config.h index fd40ec9..11bf0ec 100644 --- a/lld/ELF/Config.h +++ b/lld/ELF/Config.h @@ -51,6 +51,26 @@ class TargetInfo; struct Partition; struct PhdrEntry; +class BssSection; +class GdbIndexSection; +class GotPltSection; +class GotSection; +class IBTPltSection; +class IgotPltSection; +class InputSection; +class IpltSection; +class MipsGotSection; +class MipsRldMapSection; +class PPC32Got2Section; +class PPC64LongBranchTargetSection; +class PltSection; +class RelocationBaseSection; +class RelroPaddingSection; +class StringTableSection; +class SymbolTableBaseSection; +class SymtabShndxSection; +class SyntheticSection; + enum ELFKind : uint8_t { ELFNoneKind, ELF32LEKind, @@ -483,6 +503,42 @@ struct DuplicateSymbol { uint64_t value; }; +// Linker generated sections which can be used as inputs and are not specific to +// a partition. +struct InStruct { + std::unique_ptr<InputSection> attributes; + std::unique_ptr<SyntheticSection> riscvAttributes; + std::unique_ptr<BssSection> bss; + std::unique_ptr<BssSection> bssRelRo; + std::unique_ptr<GotSection> got; + std::unique_ptr<GotPltSection> gotPlt; + std::unique_ptr<IgotPltSection> igotPlt; + std::unique_ptr<RelroPaddingSection> relroPadding; + std::unique_ptr<SyntheticSection> armCmseSGSection; + std::unique_ptr<PPC64LongBranchTargetSection> ppc64LongBranchTarget; + std::unique_ptr<SyntheticSection> mipsAbiFlags; + std::unique_ptr<MipsGotSection> mipsGot; + std::unique_ptr<SyntheticSection> mipsOptions; + std::unique_ptr<SyntheticSection> mipsReginfo; + std::unique_ptr<MipsRldMapSection> mipsRldMap; + std::unique_ptr<SyntheticSection> partEnd; + std::unique_ptr<SyntheticSection> partIndex; + std::unique_ptr<PltSection> plt; + std::unique_ptr<IpltSection> iplt; + std::unique_ptr<PPC32Got2Section> ppc32Got2; + std::unique_ptr<IBTPltSection> ibtPlt; + std::unique_ptr<RelocationBaseSection> relaPlt; + // Non-SHF_ALLOC sections + std::unique_ptr<SyntheticSection> debugNames; + std::unique_ptr<GdbIndexSection> gdbIndex; + std::unique_ptr<StringTableSection> shStrTab; + std::unique_ptr<StringTableSection> strTab; + std::unique_ptr<SymbolTableBaseSection> symTab; + std::unique_ptr<SymtabShndxSection> symTabShndx; + + void reset(); +}; + struct Ctx { LinkerDriver driver; LinkerScript *script; @@ -502,6 +558,9 @@ struct Ctx { }; OutSections out; SmallVector<OutputSection *, 0> outputSections; + std::vector<Partition> partitions; + + InStruct in; // Some linker-generated symbols need to be created as // Defined symbols. diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp index 37460a7..34b1467 100644 --- a/lld/ELF/Driver.cpp +++ b/lld/ELF/Driver.cpp @@ -101,7 +101,9 @@ void Ctx::reset() { tlsPhdr = nullptr; out = OutSections{}; outputSections.clear(); + partitions.clear(); + ctx.in.reset(); sym = ElfSym{}; memoryBuffers.clear(); @@ -148,13 +150,9 @@ bool link(ArrayRef<const char *> args, llvm::raw_ostream &stdoutOS, ctx->e.initialize(stdoutOS, stderrOS, exitEarly, disableOutput); ctx->e.cleanupCallback = []() { elf::ctx.reset(); + elf::ctx.partitions.emplace_back(); symtab = SymbolTable(); - in.reset(); - - partitions.clear(); - partitions.emplace_back(); - SharedFile::vernauxNum = 0; }; ctx->e.logName = args::getFilenameWithoutExe(args[0]); @@ -167,8 +165,8 @@ bool link(ArrayRef<const char *> args, llvm::raw_ostream &stdoutOS, elf::ctx.script = &script; elf::ctx.symAux.emplace_back(); - partitions.clear(); - partitions.emplace_back(); + elf::ctx.partitions.clear(); + elf::ctx.partitions.emplace_back(); config->progName = args[0]; @@ -1216,7 +1214,6 @@ static void parseClangOption(StringRef opt, const Twine &msg) { const char *argv[] = {config->progName.data(), opt.data()}; if (cl::ParseCommandLineOptions(2, argv, "", &os)) return; - os.flush(); error(msg + ": " + StringRef(err).trim()); } @@ -2449,7 +2446,7 @@ static void readSymbolPartitionSection(InputSectionBase *s) { return; StringRef partName = reinterpret_cast<const char *>(s->content().data()); - for (Partition &part : partitions) { + for (Partition &part : ctx.partitions) { if (part.name == partName) { sym->partition = part.getNumber(); return; @@ -2474,11 +2471,11 @@ static void readSymbolPartitionSection(InputSectionBase *s) { // Impose a limit of no more than 254 partitions. This limit comes from the // sizes of the Partition fields in InputSectionBase and Symbol, as well as // the amount of space devoted to the partition number in RankFlags. - if (partitions.size() == 254) + if (ctx.partitions.size() == 254) fatal("may not have more than 254 partitions"); - partitions.emplace_back(); - Partition &newPart = partitions.back(); + ctx.partitions.emplace_back(); + Partition &newPart = ctx.partitions.back(); newPart.name = partName; sym->partition = newPart.getNumber(); } @@ -3098,7 +3095,7 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &args) { // Now that the number of partitions is fixed, save a pointer to the main // partition. - ctx.mainPart = &partitions[0]; + ctx.mainPart = &ctx.partitions[0]; // Read .note.gnu.property sections from input object files which // contain a hint to tweak linker's and loader's behaviors. diff --git a/lld/ELF/ICF.cpp b/lld/ELF/ICF.cpp index 92b3bbb..14e0afc 100644 --- a/lld/ELF/ICF.cpp +++ b/lld/ELF/ICF.cpp @@ -480,7 +480,7 @@ template <class ELFT> void ICF<ELFT>::run() { // If two .gcc_except_table have identical semantics (usually identical // content with PC-relative encoding), we will lose folding opportunity. uint32_t uniqueId = 0; - for (Partition &part : partitions) + for (Partition &part : ctx.partitions) part.ehFrame->iterateFDEWithLSDA<ELFT>( [&](InputSection &s) { s.eqClass[0] = s.eqClass[1] = ++uniqueId; }); diff --git a/lld/ELF/InputFiles.cpp b/lld/ELF/InputFiles.cpp index db52017..3f5f295 100644 --- a/lld/ELF/InputFiles.cpp +++ b/lld/ELF/InputFiles.cpp @@ -638,9 +638,9 @@ template <class ELFT> void ObjFile<ELFT>::parse(bool ignoreComdats) { // dynamic loaders require the presence of an attribute section for // dlopen to work. In a full implementation we would merge all attribute // sections. - if (in.attributes == nullptr) { - in.attributes = std::make_unique<InputSection>(*this, sec, name); - this->sections[i] = in.attributes.get(); + if (ctx.in.attributes == nullptr) { + ctx.in.attributes = std::make_unique<InputSection>(*this, sec, name); + this->sections[i] = ctx.in.attributes.get(); } } } @@ -1746,11 +1746,11 @@ createBitcodeSymbol(Symbol *&sym, const std::vector<bool> &keptComdats, if (!sym) { // Symbols can be duplicated in bitcode files because of '#include' and - // linkonce_odr. Use unique_saver to save symbol names for de-duplication. + // linkonce_odr. Use uniqueSaver to save symbol names for de-duplication. // Update objSym.Name to reference (via StringRef) the string saver's copy; // this way LTO can reference the same string saver's copy rather than // keeping copies of its own. - objSym.Name = unique_saver().save(objSym.getName()); + objSym.Name = uniqueSaver().save(objSym.getName()); sym = symtab.insert(objSym.getName()); } @@ -1804,11 +1804,11 @@ void BitcodeFile::parseLazy() { symbols = std::make_unique<Symbol *[]>(numSymbols); for (auto [i, irSym] : llvm::enumerate(obj->symbols())) { // Symbols can be duplicated in bitcode files because of '#include' and - // linkonce_odr. Use unique_saver to save symbol names for de-duplication. + // linkonce_odr. Use uniqueSaver to save symbol names for de-duplication. // Update objSym.Name to reference (via StringRef) the string saver's copy; // this way LTO can reference the same string saver's copy rather than // keeping copies of its own. - irSym.Name = unique_saver().save(irSym.getName()); + irSym.Name = uniqueSaver().save(irSym.getName()); if (!irSym.isUndefined()) { auto *sym = symtab.insert(irSym.getName()); sym->resolve(LazySymbol{*this}); diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp index 03b9180..9601e6b3 100644 --- a/lld/ELF/InputSection.cpp +++ b/lld/ELF/InputSection.cpp @@ -452,7 +452,7 @@ void InputSection::copyRelocations(uint8_t *buf, // Output section VA is zero for -r, so r_offset is an offset within the // section, but for --emit-relocs it is a virtual address. p->r_offset = sec->getVA(rel.offset); - p->setSymbolAndType(in.symTab->getSymbolIndex(sym), type, + p->setSymbolAndType(ctx.in.symTab->getSymbolIndex(sym), type, config->isMips64EL); if (sym.type == STT_SECTION) { @@ -744,20 +744,20 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type, // so we have to duplicate some logic here. if (sym.hasFlag(NEEDS_TLSGD) && type != R_LARCH_TLS_IE_PC_LO12) // Like R_LOONGARCH_TLSGD_PAGE_PC but taking the absolute value. - return in.got->getGlobalDynAddr(sym) + a; + return ctx.in.got->getGlobalDynAddr(sym) + a; return getRelocTargetVA(file, type, a, p, sym, R_GOT); case R_GOTONLY_PC: - return in.got->getVA() + a - p; + return ctx.in.got->getVA() + a - p; case R_GOTPLTONLY_PC: - return in.gotPlt->getVA() + a - p; + return ctx.in.gotPlt->getVA() + a - p; case R_GOTREL: case R_PPC64_RELAX_TOC: - return sym.getVA(a) - in.got->getVA(); + return sym.getVA(a) - ctx.in.got->getVA(); case R_GOTPLTREL: - return sym.getVA(a) - in.gotPlt->getVA(); + return sym.getVA(a) - ctx.in.gotPlt->getVA(); case R_GOTPLT: case R_RELAX_TLS_GD_TO_IE_GOTPLT: - return sym.getGotVA() + a - in.gotPlt->getVA(); + return sym.getGotVA() + a - ctx.in.gotPlt->getVA(); case R_TLSLD_GOT_OFF: case R_GOT_OFF: case R_RELAX_TLS_GD_TO_IE_GOT_OFF: @@ -766,22 +766,23 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type, case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC: return getAArch64Page(sym.getGotVA() + a) - getAArch64Page(p); case R_AARCH64_GOT_PAGE: - return sym.getGotVA() + a - getAArch64Page(in.got->getVA()); + return sym.getGotVA() + a - getAArch64Page(ctx.in.got->getVA()); case R_GOT_PC: case R_RELAX_TLS_GD_TO_IE: return sym.getGotVA() + a - p; case R_GOTPLT_GOTREL: - return sym.getGotPltVA() + a - in.got->getVA(); + return sym.getGotPltVA() + a - ctx.in.got->getVA(); case R_GOTPLT_PC: return sym.getGotPltVA() + a - p; case R_LOONGARCH_GOT_PAGE_PC: if (sym.hasFlag(NEEDS_TLSGD)) - return getLoongArchPageDelta(in.got->getGlobalDynAddr(sym) + a, p, type); + return getLoongArchPageDelta(ctx.in.got->getGlobalDynAddr(sym) + a, p, + type); return getLoongArchPageDelta(sym.getGotVA() + a, p, type); case R_MIPS_GOTREL: - return sym.getVA(a) - in.mipsGot->getGp(file); + return sym.getVA(a) - ctx.in.mipsGot->getGp(file); case R_MIPS_GOT_GP: - return in.mipsGot->getGp(file) + a; + return ctx.in.mipsGot->getGp(file) + a; case R_MIPS_GOT_GP_PC: { // R_MIPS_LO16 expression has R_MIPS_GOT_GP_PC type iif the target // is _gp_disp symbol. In that case we should use the following @@ -790,7 +791,7 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type, // microMIPS variants of these relocations use slightly different // expressions: AHL + GP - P + 3 for %lo() and AHL + GP - P - 1 for %hi() // to correctly handle less-significant bit of the microMIPS symbol. - uint64_t v = in.mipsGot->getGp(file) + a - p; + uint64_t v = ctx.in.mipsGot->getGp(file) + a - p; if (type == R_MIPS_LO16 || type == R_MICROMIPS_LO16) v += 4; if (type == R_MICROMIPS_LO16 || type == R_MICROMIPS_HI16) @@ -801,21 +802,24 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type, // If relocation against MIPS local symbol requires GOT entry, this entry // should be initialized by 'page address'. This address is high 16-bits // of sum the symbol's value and the addend. - return in.mipsGot->getVA() + in.mipsGot->getPageEntryOffset(file, sym, a) - - in.mipsGot->getGp(file); + return ctx.in.mipsGot->getVA() + + ctx.in.mipsGot->getPageEntryOffset(file, sym, a) - + ctx.in.mipsGot->getGp(file); case R_MIPS_GOT_OFF: case R_MIPS_GOT_OFF32: // In case of MIPS if a GOT relocation has non-zero addend this addend // should be applied to the GOT entry content not to the GOT entry offset. // That is why we use separate expression type. - return in.mipsGot->getVA() + in.mipsGot->getSymEntryOffset(file, sym, a) - - in.mipsGot->getGp(file); + return ctx.in.mipsGot->getVA() + + ctx.in.mipsGot->getSymEntryOffset(file, sym, a) - + ctx.in.mipsGot->getGp(file); case R_MIPS_TLSGD: - return in.mipsGot->getVA() + in.mipsGot->getGlobalDynOffset(file, sym) - - in.mipsGot->getGp(file); + return ctx.in.mipsGot->getVA() + + ctx.in.mipsGot->getGlobalDynOffset(file, sym) - + ctx.in.mipsGot->getGp(file); case R_MIPS_TLSLD: - return in.mipsGot->getVA() + in.mipsGot->getTlsIndexOffset(file) - - in.mipsGot->getGp(file); + return ctx.in.mipsGot->getVA() + ctx.in.mipsGot->getTlsIndexOffset(file) - + ctx.in.mipsGot->getGp(file); case R_AARCH64_PAGE_PC: { uint64_t val = sym.isUndefWeak() ? p + a : sym.getVA(a); return getAArch64Page(val) - getAArch64Page(p); @@ -864,9 +868,9 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type, case R_LOONGARCH_PLT_PAGE_PC: return getLoongArchPageDelta(sym.getPltVA() + a, p, type); case R_PLT_GOTPLT: - return sym.getPltVA() + a - in.gotPlt->getVA(); + return sym.getPltVA() + a - ctx.in.gotPlt->getVA(); case R_PLT_GOTREL: - return sym.getPltVA() + a - in.got->getVA(); + return sym.getPltVA() + a - ctx.in.got->getVA(); case R_PPC32_PLTREL: // R_PPC_PLTREL24 uses the addend (usually 0 or 0x8000) to indicate r30 // stores _GLOBAL_OFFSET_TABLE_ or .got2+0x8000. The addend is ignored for @@ -912,29 +916,32 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type, case R_SIZE: return sym.getSize() + a; case R_TLSDESC: - return in.got->getTlsDescAddr(sym) + a; + return ctx.in.got->getTlsDescAddr(sym) + a; case R_TLSDESC_PC: - return in.got->getTlsDescAddr(sym) + a - p; + return ctx.in.got->getTlsDescAddr(sym) + a - p; case R_TLSDESC_GOTPLT: - return in.got->getTlsDescAddr(sym) + a - in.gotPlt->getVA(); + return ctx.in.got->getTlsDescAddr(sym) + a - ctx.in.gotPlt->getVA(); case R_AARCH64_TLSDESC_PAGE: - return getAArch64Page(in.got->getTlsDescAddr(sym) + a) - getAArch64Page(p); + return getAArch64Page(ctx.in.got->getTlsDescAddr(sym) + a) - + getAArch64Page(p); case R_LOONGARCH_TLSDESC_PAGE_PC: - return getLoongArchPageDelta(in.got->getTlsDescAddr(sym) + a, p, type); + return getLoongArchPageDelta(ctx.in.got->getTlsDescAddr(sym) + a, p, type); case R_TLSGD_GOT: - return in.got->getGlobalDynOffset(sym) + a; + return ctx.in.got->getGlobalDynOffset(sym) + a; case R_TLSGD_GOTPLT: - return in.got->getGlobalDynAddr(sym) + a - in.gotPlt->getVA(); + return ctx.in.got->getGlobalDynAddr(sym) + a - ctx.in.gotPlt->getVA(); case R_TLSGD_PC: - return in.got->getGlobalDynAddr(sym) + a - p; + return ctx.in.got->getGlobalDynAddr(sym) + a - p; case R_LOONGARCH_TLSGD_PAGE_PC: - return getLoongArchPageDelta(in.got->getGlobalDynAddr(sym) + a, p, type); + return getLoongArchPageDelta(ctx.in.got->getGlobalDynAddr(sym) + a, p, + type); case R_TLSLD_GOTPLT: - return in.got->getVA() + in.got->getTlsIndexOff() + a - in.gotPlt->getVA(); + return ctx.in.got->getVA() + ctx.in.got->getTlsIndexOff() + a - + ctx.in.gotPlt->getVA(); case R_TLSLD_GOT: - return in.got->getTlsIndexOff() + a; + return ctx.in.got->getTlsIndexOff() + a; case R_TLSLD_PC: - return in.got->getTlsIndexVA() + a - p; + return ctx.in.got->getTlsIndexVA() + a - p; default: llvm_unreachable("invalid expression"); } diff --git a/lld/ELF/InputSection.h b/lld/ELF/InputSection.h index 60c8d57..f7672bb 100644 --- a/lld/ELF/InputSection.h +++ b/lld/ELF/InputSection.h @@ -33,8 +33,6 @@ class SyntheticSection; template <class ELFT> class ObjFile; class OutputSection; -LLVM_LIBRARY_VISIBILITY extern std::vector<Partition> partitions; - // Returned by InputSectionBase::relsOrRelas. At most one member is empty. template <class ELFT> struct RelsOrRelas { Relocs<typename ELFT::Rel> rels; diff --git a/lld/ELF/LinkerScript.cpp b/lld/ELF/LinkerScript.cpp index 8bab26c..3e8f375 100644 --- a/lld/ELF/LinkerScript.cpp +++ b/lld/ELF/LinkerScript.cpp @@ -649,7 +649,7 @@ LinkerScript::computeInputSections(const InputSectionDescription *cmd, } void LinkerScript::discard(InputSectionBase &s) { - if (&s == in.shStrTab.get()) + if (&s == ctx.in.shStrTab.get()) error("discarding " + s.name + " section is not allowed"); s.markDead(); @@ -659,7 +659,7 @@ void LinkerScript::discard(InputSectionBase &s) { } void LinkerScript::discardSynthetic(OutputSection &outCmd) { - for (Partition &part : partitions) { + for (Partition &part : ctx.partitions) { if (!part.armExidx || !part.armExidx->isLive()) continue; SmallVector<InputSectionBase *, 0> secs( @@ -1038,7 +1038,7 @@ void LinkerScript::diagnoseOrphanHandling() const { for (const InputSectionBase *sec : orphanSections) { // .relro_padding is inserted before DATA_SEGMENT_RELRO_END, if present, // automatically. The section is not supposed to be specified by scripts. - if (sec == in.relroPadding.get()) + if (sec == ctx.in.relroPadding.get()) continue; // Input SHT_REL[A] retained by --emit-relocs are ignored by // computeInputSections(). Don't warn/error. @@ -1055,7 +1055,7 @@ void LinkerScript::diagnoseOrphanHandling() const { } void LinkerScript::diagnoseMissingSGSectionAddress() const { - if (!config->cmseImplib || !in.armCmseSGSection->isNeeded()) + if (!config->cmseImplib || !ctx.in.armCmseSGSection->isNeeded()) return; OutputSection *sec = findByName(sectionCommands, ".gnu.sgstubs"); @@ -1237,7 +1237,7 @@ bool LinkerScript::assignOffsets(OutputSection *sec) { // If .relro_padding is present, round up the end to a common-page-size // boundary to protect the last page. - if (in.relroPadding && sec == in.relroPadding->getParent()) + if (ctx.in.relroPadding && sec == ctx.in.relroPadding->getParent()) expandOutputSection(alignToPowerOf2(dot, config->commonPageSize) - dot); // Non-SHF_ALLOC sections do not affect the addresses of other OutputSections @@ -1361,7 +1361,8 @@ void LinkerScript::adjustOutputSections() { // Discard .relro_padding if we have not seen one RELRO section. Note: when // .tbss is the only RELRO section, there is no associated PT_LOAD segment // (needsPtLoad), so we don't append .relro_padding in the case. - if (in.relroPadding && in.relroPadding->getParent() == sec && !seenRelro) + if (ctx.in.relroPadding && ctx.in.relroPadding->getParent() == sec && + !seenRelro) discardable = true; if (discardable) { sec->markDead(); diff --git a/lld/ELF/MarkLive.cpp b/lld/ELF/MarkLive.cpp index 56ff53f..f11d7f5 100644 --- a/lld/ELF/MarkLive.cpp +++ b/lld/ELF/MarkLive.cpp @@ -377,13 +377,13 @@ template <class ELFT> void elf::markLive() { sec->markDead(); // Follow the graph to mark all live sections. - for (unsigned curPart = 1; curPart <= partitions.size(); ++curPart) - MarkLive<ELFT>(curPart).run(); + for (unsigned i = 1, e = ctx.partitions.size(); i <= e; ++i) + MarkLive<ELFT>(i).run(); // If we have multiple partitions, some sections need to live in the main // partition even if they were allocated to a loadable partition. Move them // there now. - if (partitions.size() != 1) + if (ctx.partitions.size() != 1) MarkLive<ELFT>(1).moveToMain(); // Report garbage-collected sections. diff --git a/lld/ELF/OutputSections.cpp b/lld/ELF/OutputSections.cpp index cb17e10..1aede8df 100644 --- a/lld/ELF/OutputSections.cpp +++ b/lld/ELF/OutputSections.cpp @@ -22,6 +22,7 @@ #include "llvm/Support/Parallel.h" #include "llvm/Support/Path.h" #include "llvm/Support/TimeProfiler.h" +#undef in #if LLVM_ENABLE_ZLIB // Avoid introducing max as a macro from Windows headers. #define NOMINMAX @@ -584,7 +585,7 @@ void OutputSection::writeTo(uint8_t *buf, parallel::TaskGroup &tg) { static void finalizeShtGroup(OutputSection *os, InputSection *section) { // sh_link field for SHT_GROUP sections should contain the section index of // the symbol table. - os->link = in.symTab->getParent()->sectionIndex; + os->link = ctx.in.symTab->getParent()->sectionIndex; if (!section) return; @@ -592,7 +593,7 @@ static void finalizeShtGroup(OutputSection *os, InputSection *section) { // sh_info then contain index of an entry in symbol table section which // provides signature of the section group. ArrayRef<Symbol *> symbols = section->file->getSymbols(); - os->info = in.symTab->getSymbolIndex(*symbols[section->info]); + os->info = ctx.in.symTab->getSymbolIndex(*symbols[section->info]); // Some group members may be combined or discarded, so we need to compute the // new size. The content will be rewritten in InputSection::copyShtGroup. @@ -610,7 +611,7 @@ encodeOneCrel(raw_svector_ostream &os, Elf_Crel<sizeof(uint) == 8> &out, uint offset, const Symbol &sym, uint32_t type, uint addend) { const auto deltaOffset = static_cast<uint64_t>(offset - out.r_offset); out.r_offset = offset; - int64_t symidx = in.symTab->getSymbolIndex(sym); + int64_t symidx = ctx.in.symTab->getSymbolIndex(sym); if (sym.type == STT_SECTION) { auto *d = dyn_cast<Defined>(&sym); if (d) { @@ -731,7 +732,7 @@ void OutputSection::finalize() { if (!first || isa<SyntheticSection>(first)) return; - link = in.symTab->getParent()->sectionIndex; + link = ctx.in.symTab->getParent()->sectionIndex; // sh_info for SHT_REL[A] sections should contain the section header index of // the section to which the relocation applies. InputSectionBase *s = first->getRelocatedSection(); @@ -881,8 +882,8 @@ void OutputSection::checkDynRelAddends(const uint8_t *bufStart) { // Some targets have NOBITS synthetic sections with dynamic relocations // with non-zero addends. Skip such sections. if (is_contained({EM_PPC, EM_PPC64}, config->emachine) && - (rel.inputSec == in.ppc64LongBranchTarget.get() || - rel.inputSec == in.igotPlt.get())) + (rel.inputSec == ctx.in.ppc64LongBranchTarget.get() || + rel.inputSec == ctx.in.igotPlt.get())) continue; const uint8_t *relocTarget = bufStart + relOsec->offset + rel.inputSec->getOffset(rel.offsetInSec); diff --git a/lld/ELF/Relocations.cpp b/lld/ELF/Relocations.cpp index e5f58f1..6c07051 100644 --- a/lld/ELF/Relocations.cpp +++ b/lld/ELF/Relocations.cpp @@ -382,7 +382,7 @@ template <class ELFT> static void addCopyRelSymbol(SharedSymbol &ss) { bool isRO = isReadOnly<ELFT>(ss); BssSection *sec = make<BssSection>(isRO ? ".bss.rel.ro" : ".bss", symSize, ss.alignment); - OutputSection *osec = (isRO ? in.bssRelRo : in.bss)->getParent(); + OutputSection *osec = (isRO ? ctx.in.bssRelRo : ctx.in.bss)->getParent(); // At this point, sectionBases has been migrated to sections. Append sec to // sections. @@ -922,12 +922,12 @@ static void addPltEntry(PltSection &plt, GotPltSection &gotPlt, } void elf::addGotEntry(Symbol &sym) { - in.got->addEntry(sym); + ctx.in.got->addEntry(sym); uint64_t off = sym.getGotOffset(); // If preemptible, emit a GLOB_DAT relocation. if (sym.isPreemptible) { - ctx.mainPart->relaDyn->addReloc({ctx.target->gotRel, in.got.get(), off, + ctx.mainPart->relaDyn->addReloc({ctx.target->gotRel, ctx.in.got.get(), off, DynamicReloc::AgainstSymbol, sym, 0, R_ABS}); return; @@ -936,20 +936,20 @@ void elf::addGotEntry(Symbol &sym) { // Otherwise, the value is either a link-time constant or the load base // plus a constant. if (!config->isPic || isAbsolute(sym)) - in.got->addConstant({R_ABS, ctx.target->symbolicRel, off, 0, &sym}); + ctx.in.got->addConstant({R_ABS, ctx.target->symbolicRel, off, 0, &sym}); else - addRelativeReloc(*in.got, off, sym, 0, R_ABS, ctx.target->symbolicRel); + addRelativeReloc(*ctx.in.got, off, sym, 0, R_ABS, ctx.target->symbolicRel); } static void addTpOffsetGotEntry(Symbol &sym) { - in.got->addEntry(sym); + ctx.in.got->addEntry(sym); uint64_t off = sym.getGotOffset(); if (!sym.isPreemptible && !config->shared) { - in.got->addConstant({R_TPREL, ctx.target->symbolicRel, off, 0, &sym}); + ctx.in.got->addConstant({R_TPREL, ctx.target->symbolicRel, off, 0, &sym}); return; } ctx.mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible( - ctx.target->tlsGotRel, *in.got, off, sym, ctx.target->symbolicRel); + ctx.target->tlsGotRel, *ctx.in.got, off, sym, ctx.target->symbolicRel); } // Return true if we can define a symbol in the executable that @@ -1077,7 +1077,7 @@ void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset, // If the target adjusted the expression to R_RELAX_GOT_PC, we may end up // needing the GOT if we can't relax everything. if (expr == R_RELAX_GOT_PC) - in.got->hasGotOffRel.store(true, std::memory_order_relaxed); + ctx.in.got->hasGotOffRel.store(true, std::memory_order_relaxed); } } @@ -1100,7 +1100,7 @@ void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset, // See "Global Offset Table" in Chapter 5 in the following document // for detailed description: // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf - in.mipsGot->addEntry(*sec->file, sym, addend, expr); + ctx.in.mipsGot->addEntry(*sec->file, sym, addend, expr); } else if (!sym.isTls() || config->emachine != EM_LOONGARCH) { // Many LoongArch TLS relocs reuse the R_LOONGARCH_GOT type, in which // case the NEEDS_GOT flag shouldn't get set. @@ -1190,7 +1190,7 @@ void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset, // a dynamic relocation. // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf p.4-19 if (config->emachine == EM_MIPS) - in.mipsGot->addEntry(*sec->file, sym, addend, expr); + ctx.in.mipsGot->addEntry(*sec->file, sym, addend, expr); return; } } @@ -1275,12 +1275,12 @@ static unsigned handleMipsTlsRelocation(RelType type, Symbol &sym, InputSectionBase &c, uint64_t offset, int64_t addend, RelExpr expr) { if (expr == R_MIPS_TLSLD) { - in.mipsGot->addTlsIndex(*c.file); + ctx.in.mipsGot->addTlsIndex(*c.file); c.addReloc({expr, type, offset, addend, &sym}); return 1; } if (expr == R_MIPS_TLSGD) { - in.mipsGot->addDynTlsEntry(*c.file, sym); + ctx.in.mipsGot->addDynTlsEntry(*c.file, sym); c.addReloc({expr, type, offset, addend, &sym}); return 1; } @@ -1526,10 +1526,10 @@ void RelocationScanner::scanOne(typename Relocs<RelTy>::const_iterator &i) { // The 5 types that relative GOTPLT are all x86 and x86-64 specific. if (oneof<R_GOTPLTONLY_PC, R_GOTPLTREL, R_GOTPLT, R_PLT_GOTPLT, R_TLSDESC_GOTPLT, R_TLSGD_GOTPLT>(expr)) { - in.gotPlt->hasGotPltOffRel.store(true, std::memory_order_relaxed); + ctx.in.gotPlt->hasGotPltOffRel.store(true, std::memory_order_relaxed); } else if (oneof<R_GOTONLY_PC, R_GOTREL, R_PPC32_PLTREL, R_PPC64_TOCBASE, R_PPC64_RELAX_TOC>(expr)) { - in.got->hasGotOffRel.store(true, std::memory_order_relaxed); + ctx.in.got->hasGotOffRel.store(true, std::memory_order_relaxed); } // Process TLS relocations, including TLS optimizations. Note that @@ -1662,7 +1662,7 @@ template <class ELFT> void elf::scanRelocations() { tg.spawn([] { RelocationScanner scanner; - for (Partition &part : partitions) { + for (Partition &part : ctx.partitions) { for (EhInputSection *sec : part.ehFrame->sections) scanner.template scanSection<ELFT>(*sec, /*isEH=*/true); if (part.armExidx && part.armExidx->isLive()) @@ -1732,15 +1732,16 @@ static bool handleNonPreemptibleIfunc(Symbol &sym, uint16_t flags) { auto *directSym = makeDefined(cast<Defined>(sym)); directSym->allocateAux(); auto &dyn = - config->androidPackDynRelocs ? *in.relaPlt : *ctx.mainPart->relaDyn; - addPltEntry(*in.iplt, *in.igotPlt, dyn, ctx.target->iRelativeRel, *directSym); + config->androidPackDynRelocs ? *ctx.in.relaPlt : *ctx.mainPart->relaDyn; + addPltEntry(*ctx.in.iplt, *ctx.in.igotPlt, dyn, ctx.target->iRelativeRel, + *directSym); sym.allocateAux(); ctx.symAux.back().pltIdx = ctx.symAux[directSym->auxIdx].pltIdx; if (flags & HAS_DIRECT_RELOC) { // Change the value to the IPLT and redirect all references to it. auto &d = cast<Defined>(sym); - d.section = in.iplt.get(); + d.section = ctx.in.iplt.get(); d.value = d.getPltIdx() * ctx.target->ipltEntrySize; d.size = 0; // It's important to set the symbol type here so that dynamic loaders @@ -1772,7 +1773,8 @@ void elf::postScanRelocations() { if (flags & NEEDS_GOT) addGotEntry(sym); if (flags & NEEDS_PLT) - addPltEntry(*in.plt, *in.gotPlt, *in.relaPlt, ctx.target->pltRel, sym); + addPltEntry(*ctx.in.plt, *ctx.in.gotPlt, *ctx.in.relaPlt, + ctx.target->pltRel, sym); if (flags & NEEDS_COPY) { if (sym.isObject()) { invokeELFT(addCopyRelSymbol, cast<SharedSymbol>(sym)); @@ -1782,16 +1784,16 @@ void elf::postScanRelocations() { } else { assert(sym.isFunc() && sym.hasFlag(NEEDS_PLT)); if (!sym.isDefined()) { - replaceWithDefined(sym, *in.plt, + replaceWithDefined(sym, *ctx.in.plt, ctx.target->pltHeaderSize + ctx.target->pltEntrySize * sym.getPltIdx(), 0); sym.setFlags(NEEDS_COPY); if (config->emachine == EM_PPC) { // PPC32 canonical PLT entries are at the beginning of .glink - cast<Defined>(sym).value = in.plt->headerSize; - in.plt->headerSize += 16; - cast<PPC32GlinkSection>(*in.plt).canonical_plts.push_back(&sym); + cast<Defined>(sym).value = ctx.in.plt->headerSize; + ctx.in.plt->headerSize += 16; + cast<PPC32GlinkSection>(*ctx.in.plt).canonical_plts.push_back(&sym); } } } @@ -1800,7 +1802,7 @@ void elf::postScanRelocations() { if (!sym.isTls()) return; bool isLocalInExecutable = !sym.isPreemptible && !config->shared; - GotSection *got = in.got.get(); + GotSection *got = ctx.in.got.get(); if (flags & NEEDS_TLSDESC) { got->addTlsDescEntry(sym); @@ -1842,7 +1844,7 @@ void elf::postScanRelocations() { addTpOffsetGotEntry(sym); }; - GotSection *got = in.got.get(); + GotSection *got = ctx.in.got.get(); if (ctx.needsTlsLd.load(std::memory_order_relaxed) && got->addTlsIndex()) { static Undefined dummy(ctx.internalFile, "", STB_LOCAL, 0, 0); if (config->shared) @@ -2379,7 +2381,7 @@ void elf::hexagonTLSSymbolUpdate(ArrayRef<OutputSection *> outputSections) { if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) { if (needEntry) { sym->allocateAux(); - addPltEntry(*in.plt, *in.gotPlt, *in.relaPlt, + addPltEntry(*ctx.in.plt, *ctx.in.gotPlt, *ctx.in.relaPlt, ctx.target->pltRel, *sym); needEntry = false; } diff --git a/lld/ELF/Symbols.cpp b/lld/ELF/Symbols.cpp index b08c679..cd3fdce 100644 --- a/lld/ELF/Symbols.cpp +++ b/lld/ELF/Symbols.cpp @@ -147,8 +147,8 @@ uint64_t Symbol::getVA(int64_t addend) const { uint64_t Symbol::getGotVA() const { if (gotInIgot) - return in.igotPlt->getVA() + getGotPltOffset(); - return in.got->getVA() + getGotOffset(); + return ctx.in.igotPlt->getVA() + getGotPltOffset(); + return ctx.in.got->getVA() + getGotOffset(); } uint64_t Symbol::getGotOffset() const { @@ -157,8 +157,8 @@ uint64_t Symbol::getGotOffset() const { uint64_t Symbol::getGotPltVA() const { if (isInIplt) - return in.igotPlt->getVA() + getGotPltOffset(); - return in.gotPlt->getVA() + getGotPltOffset(); + return ctx.in.igotPlt->getVA() + getGotPltOffset(); + return ctx.in.gotPlt->getVA() + getGotPltOffset(); } uint64_t Symbol::getGotPltOffset() const { @@ -170,8 +170,8 @@ uint64_t Symbol::getGotPltOffset() const { uint64_t Symbol::getPltVA() const { uint64_t outVA = - isInIplt ? in.iplt->getVA() + getPltIdx() * ctx.target->ipltEntrySize - : in.plt->getVA() + in.plt->headerSize + + isInIplt ? ctx.in.iplt->getVA() + getPltIdx() * ctx.target->ipltEntrySize + : ctx.in.plt->getVA() + ctx.in.plt->headerSize + getPltIdx() * ctx.target->pltEntrySize; // While linking microMIPS code PLT code are always microMIPS diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp index df82e9e..2239c36 100644 --- a/lld/ELF/SyntheticSections.cpp +++ b/lld/ELF/SyntheticSections.cpp @@ -168,7 +168,7 @@ template <class ELFT> void MipsOptionsSection<ELFT>::writeTo(uint8_t *buf) { options->size = getSize(); if (!config->relocatable) - reginfo.ri_gp_value = in.mipsGot->getGp(); + reginfo.ri_gp_value = ctx.in.mipsGot->getGp(); memcpy(buf + sizeof(Elf_Mips_Options), ®info, sizeof(reginfo)); } @@ -225,7 +225,7 @@ MipsReginfoSection<ELFT>::MipsReginfoSection(Elf_Mips_RegInfo reginfo) template <class ELFT> void MipsReginfoSection<ELFT>::writeTo(uint8_t *buf) { if (!config->relocatable) - reginfo.ri_gp_value = in.mipsGot->getGp(); + reginfo.ri_gp_value = ctx.in.mipsGot->getGp(); memcpy(buf, ®info, sizeof(reginfo)); } @@ -273,8 +273,8 @@ Defined *elf::addSyntheticLocal(StringRef name, uint8_t type, uint64_t value, uint64_t size, InputSectionBase §ion) { Defined *s = makeDefined(section.file, name, STB_LOCAL, STV_DEFAULT, type, value, size, §ion); - if (in.symTab) - in.symTab->addSymbol(s); + if (ctx.in.symTab) + ctx.in.symTab->addSymbol(s); if (config->emachine == EM_ARM && !config->isLE && config->armBe8 && (section.flags & SHF_EXECINSTR)) @@ -1295,14 +1295,14 @@ DynamicSection<ELFT>::DynamicSection() // The output section .rela.dyn may include these synthetic sections: // // - part.relaDyn -// - in.relaPlt: this is included if a linker script places .rela.plt inside +// - ctx.in.relaPlt: this is included if a linker script places .rela.plt inside // .rela.dyn // // DT_RELASZ is the total size of the included sections. static uint64_t addRelaSz(const RelocationBaseSection &relaDyn) { size_t size = relaDyn.getSize(); - if (in.relaPlt->getParent() == relaDyn.getParent()) - size += in.relaPlt->getSize(); + if (ctx.in.relaPlt->getParent() == relaDyn.getParent()) + size += ctx.in.relaPlt->getSize(); return size; } @@ -1310,7 +1310,7 @@ static uint64_t addRelaSz(const RelocationBaseSection &relaDyn) { // output section. When this occurs we cannot just use the OutputSection // Size. Moreover the [DT_JMPREL, DT_JMPREL + DT_PLTRELSZ) is permitted to // overlap with the [DT_RELA, DT_RELA + DT_RELASZ). -static uint64_t addPltRelSz() { return in.relaPlt->getSize(); } +static uint64_t addPltRelSz() { return ctx.in.relaPlt->getSize(); } // Add remaining entries to complete .dynamic contents. template <class ELFT> @@ -1430,36 +1430,36 @@ DynamicSection<ELFT>::computeContents() { addInt(DT_AARCH64_AUTH_RELRSZ, part.relrAuthDyn->getParent()->size); addInt(DT_AARCH64_AUTH_RELRENT, sizeof(Elf_Relr)); } - if (isMain && in.relaPlt->isNeeded()) { - addInSec(DT_JMPREL, *in.relaPlt); + if (isMain && ctx.in.relaPlt->isNeeded()) { + addInSec(DT_JMPREL, *ctx.in.relaPlt); entries.emplace_back(DT_PLTRELSZ, addPltRelSz()); switch (config->emachine) { case EM_MIPS: - addInSec(DT_MIPS_PLTGOT, *in.gotPlt); + addInSec(DT_MIPS_PLTGOT, *ctx.in.gotPlt); break; case EM_S390: - addInSec(DT_PLTGOT, *in.got); + addInSec(DT_PLTGOT, *ctx.in.got); break; case EM_SPARCV9: - addInSec(DT_PLTGOT, *in.plt); + addInSec(DT_PLTGOT, *ctx.in.plt); break; case EM_AARCH64: - if (llvm::find_if(in.relaPlt->relocs, [](const DynamicReloc &r) { + if (llvm::find_if(ctx.in.relaPlt->relocs, [](const DynamicReloc &r) { return r.type == ctx.target->pltRel && r.sym->stOther & STO_AARCH64_VARIANT_PCS; - }) != in.relaPlt->relocs.end()) + }) != ctx.in.relaPlt->relocs.end()) addInt(DT_AARCH64_VARIANT_PCS, 0); - addInSec(DT_PLTGOT, *in.gotPlt); + addInSec(DT_PLTGOT, *ctx.in.gotPlt); break; case EM_RISCV: - if (llvm::any_of(in.relaPlt->relocs, [](const DynamicReloc &r) { + if (llvm::any_of(ctx.in.relaPlt->relocs, [](const DynamicReloc &r) { return r.type == ctx.target->pltRel && (r.sym->stOther & STO_RISCV_VARIANT_CC); })) addInt(DT_RISCV_VARIANT_CC, 0); [[fallthrough]]; default: - addInSec(DT_PLTGOT, *in.gotPlt); + addInSec(DT_PLTGOT, *ctx.in.gotPlt); break; } addInt(DT_PLTREL, config->isRela ? DT_RELA : DT_REL); @@ -1537,33 +1537,34 @@ DynamicSection<ELFT>::computeContents() { addInt(DT_MIPS_FLAGS, RHF_NOTPOT); addInt(DT_MIPS_BASE_ADDRESS, ctx.target->getImageBase()); addInt(DT_MIPS_SYMTABNO, part.dynSymTab->getNumSymbols()); - addInt(DT_MIPS_LOCAL_GOTNO, in.mipsGot->getLocalEntriesNum()); + addInt(DT_MIPS_LOCAL_GOTNO, ctx.in.mipsGot->getLocalEntriesNum()); - if (const Symbol *b = in.mipsGot->getFirstGlobalEntry()) + if (const Symbol *b = ctx.in.mipsGot->getFirstGlobalEntry()) addInt(DT_MIPS_GOTSYM, b->dynsymIndex); else addInt(DT_MIPS_GOTSYM, part.dynSymTab->getNumSymbols()); - addInSec(DT_PLTGOT, *in.mipsGot); - if (in.mipsRldMap) { + addInSec(DT_PLTGOT, *ctx.in.mipsGot); + if (ctx.in.mipsRldMap) { if (!config->pie) - addInSec(DT_MIPS_RLD_MAP, *in.mipsRldMap); + addInSec(DT_MIPS_RLD_MAP, *ctx.in.mipsRldMap); // Store the offset to the .rld_map section // relative to the address of the tag. addInt(DT_MIPS_RLD_MAP_REL, - in.mipsRldMap->getVA() - (getVA() + entries.size() * entsize)); + ctx.in.mipsRldMap->getVA() - (getVA() + entries.size() * entsize)); } } // DT_PPC_GOT indicates to glibc Secure PLT is used. If DT_PPC_GOT is absent, // glibc assumes the old-style BSS PLT layout which we don't support. if (config->emachine == EM_PPC) - addInSec(DT_PPC_GOT, *in.got); + addInSec(DT_PPC_GOT, *ctx.in.got); // Glink dynamic tag is required by the V2 abi if the plt section isn't empty. - if (config->emachine == EM_PPC64 && in.plt->isNeeded()) { + if (config->emachine == EM_PPC64 && ctx.in.plt->isNeeded()) { // The Glink tag points to 32 bytes before the first lazy symbol resolution // stub, which starts directly after the header. - addInt(DT_PPC64_GLINK, in.plt->getVA() + ctx.target->pltHeaderSize - 32); + addInt(DT_PPC64_GLINK, + ctx.in.plt->getVA() + ctx.target->pltHeaderSize - 32); } if (config->emachine == EM_PPC64) @@ -1685,9 +1686,9 @@ void RelocationBaseSection::finalizeContents() { else getParent()->link = 0; - if (in.relaPlt.get() == this && in.gotPlt->getParent()) { + if (ctx.in.relaPlt.get() == this && ctx.in.gotPlt->getParent()) { getParent()->flags |= ELF::SHF_INFO_LINK; - getParent()->info = in.gotPlt->getParent()->sectionIndex; + getParent()->info = ctx.in.gotPlt->getParent()->sectionIndex; } } @@ -2331,7 +2332,7 @@ void SymtabShndxSection::writeTo(uint8_t *buf) { // with an entry in .symtab. If the corresponding entry contains SHN_XINDEX, // we need to write actual index, otherwise, we must write SHN_UNDEF(0). buf += 4; // Ignore .symtab[0] entry. - for (const SymbolTableEntry &entry : in.symTab->getSymbols()) { + for (const SymbolTableEntry &entry : ctx.in.symTab->getSymbols()) { if (!getCommonSec(entry.sym) && getSymSectionIndex(entry.sym) == SHN_XINDEX) write32(buf, entry.sym->getOutputSection()->sectionIndex); buf += 4; @@ -2352,11 +2353,11 @@ bool SymtabShndxSection::isNeeded() const { } void SymtabShndxSection::finalizeContents() { - getParent()->link = in.symTab->getParent()->sectionIndex; + getParent()->link = ctx.in.symTab->getParent()->sectionIndex; } size_t SymtabShndxSection::getSize() const { - return in.symTab->getNumSymbols() * 4; + return ctx.in.symTab->getNumSymbols() * 4; } // .hash and .gnu.hash sections contain on-disk hash tables that map @@ -2583,7 +2584,7 @@ size_t PltSection::getSize() const { bool PltSection::isNeeded() const { // For -z retpolineplt, .iplt needs the .plt header. - return !entries.empty() || (config->zRetpolineplt && in.iplt->isNeeded()); + return !entries.empty() || (config->zRetpolineplt && ctx.in.iplt->isNeeded()); } // Used by ARM to add mapping symbols in the PLT section, which aid @@ -2708,15 +2709,15 @@ IBTPltSection::IBTPltSection() : SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 16, ".plt") {} void IBTPltSection::writeTo(uint8_t *buf) { - ctx.target->writeIBTPlt(buf, in.plt->getNumEntries()); + ctx.target->writeIBTPlt(buf, ctx.in.plt->getNumEntries()); } size_t IBTPltSection::getSize() const { // 16 is the header size of .plt. - return 16 + in.plt->getNumEntries() * ctx.target->pltEntrySize; + return 16 + ctx.in.plt->getNumEntries() * ctx.target->pltEntrySize; } -bool IBTPltSection::isNeeded() const { return in.plt->getNumEntries() > 0; } +bool IBTPltSection::isNeeded() const { return ctx.in.plt->getNumEntries() > 0; } RelroPaddingSection::RelroPaddingSection() : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_NOBITS, 1, ".relro_padding") { @@ -4438,26 +4439,26 @@ PartitionIndexSection::PartitionIndexSection() : SyntheticSection(SHF_ALLOC, SHT_PROGBITS, 4, ".rodata") {} size_t PartitionIndexSection::getSize() const { - return 12 * (partitions.size() - 1); + return 12 * (ctx.partitions.size() - 1); } void PartitionIndexSection::finalizeContents() { - for (size_t i = 1; i != partitions.size(); ++i) - partitions[i].nameStrTab = - ctx.mainPart->dynStrTab->addString(partitions[i].name); + for (size_t i = 1; i != ctx.partitions.size(); ++i) + ctx.partitions[i].nameStrTab = + ctx.mainPart->dynStrTab->addString(ctx.partitions[i].name); } void PartitionIndexSection::writeTo(uint8_t *buf) { uint64_t va = getVA(); - for (size_t i = 1; i != partitions.size(); ++i) { - write32(buf, - ctx.mainPart->dynStrTab->getVA() + partitions[i].nameStrTab - va); - write32(buf + 4, partitions[i].elfHeader->getVA() - (va + 4)); + for (size_t i = 1; i != ctx.partitions.size(); ++i) { + write32(buf, ctx.mainPart->dynStrTab->getVA() + + ctx.partitions[i].nameStrTab - va); + write32(buf + 4, ctx.partitions[i].elfHeader->getVA() - (va + 4)); - SyntheticSection *next = i == partitions.size() - 1 - ? in.partEnd.get() - : partitions[i + 1].elfHeader.get(); - write32(buf + 8, next->getVA() - partitions[i].elfHeader->getVA()); + SyntheticSection *next = i == ctx.partitions.size() - 1 + ? ctx.in.partEnd.get() + : ctx.partitions[i + 1].elfHeader.get(); + write32(buf + 8, next->getVA() - ctx.partitions[i].elfHeader->getVA()); va += 12; buf += 12; @@ -4657,7 +4658,7 @@ template <class ELFT> void elf::createSyntheticSections() { // The removeUnusedSyntheticSections() function relies on the // SyntheticSections coming last. if (needsInterpSection()) { - for (size_t i = 1; i <= partitions.size(); ++i) { + for (size_t i = 1; i <= ctx.partitions.size(); ++i) { InputSection *sec = createInterpSection(); sec->partition = i; ctx.inputSections.push_back(sec); @@ -4667,47 +4668,47 @@ template <class ELFT> void elf::createSyntheticSections() { auto add = [](SyntheticSection &sec) { ctx.inputSections.push_back(&sec); }; if (config->zSectionHeader) - in.shStrTab = std::make_unique<StringTableSection>(".shstrtab", false); + ctx.in.shStrTab = std::make_unique<StringTableSection>(".shstrtab", false); ctx.out.programHeaders = make<OutputSection>("", 0, SHF_ALLOC); ctx.out.programHeaders->addralign = config->wordsize; if (config->strip != StripPolicy::All) { - in.strTab = std::make_unique<StringTableSection>(".strtab", false); - in.symTab = std::make_unique<SymbolTableSection<ELFT>>(*in.strTab); - in.symTabShndx = std::make_unique<SymtabShndxSection>(); + ctx.in.strTab = std::make_unique<StringTableSection>(".strtab", false); + ctx.in.symTab = std::make_unique<SymbolTableSection<ELFT>>(*ctx.in.strTab); + ctx.in.symTabShndx = std::make_unique<SymtabShndxSection>(); } - in.bss = std::make_unique<BssSection>(".bss", 0, 1); - add(*in.bss); + ctx.in.bss = std::make_unique<BssSection>(".bss", 0, 1); + add(*ctx.in.bss); // If there is a SECTIONS command and a .data.rel.ro section name use name // .data.rel.ro.bss so that we match in the .data.rel.ro output section. // This makes sure our relro is contiguous. bool hasDataRelRo = ctx.script->hasSectionsCommand && findSection(".data.rel.ro"); - in.bssRelRo = std::make_unique<BssSection>( + ctx.in.bssRelRo = std::make_unique<BssSection>( hasDataRelRo ? ".data.rel.ro.bss" : ".bss.rel.ro", 0, 1); - add(*in.bssRelRo); + add(*ctx.in.bssRelRo); // Add MIPS-specific sections. if (config->emachine == EM_MIPS) { if (!config->shared && config->hasDynSymTab) { - in.mipsRldMap = std::make_unique<MipsRldMapSection>(); - add(*in.mipsRldMap); + ctx.in.mipsRldMap = std::make_unique<MipsRldMapSection>(); + add(*ctx.in.mipsRldMap); } - if ((in.mipsAbiFlags = MipsAbiFlagsSection<ELFT>::create())) - add(*in.mipsAbiFlags); - if ((in.mipsOptions = MipsOptionsSection<ELFT>::create())) - add(*in.mipsOptions); - if ((in.mipsReginfo = MipsReginfoSection<ELFT>::create())) - add(*in.mipsReginfo); + if ((ctx.in.mipsAbiFlags = MipsAbiFlagsSection<ELFT>::create())) + add(*ctx.in.mipsAbiFlags); + if ((ctx.in.mipsOptions = MipsOptionsSection<ELFT>::create())) + add(*ctx.in.mipsOptions); + if ((ctx.in.mipsReginfo = MipsReginfoSection<ELFT>::create())) + add(*ctx.in.mipsReginfo); } StringRef relaDynName = config->isRela ? ".rela.dyn" : ".rel.dyn"; const unsigned threadCount = config->threadCount; - for (Partition &part : partitions) { + for (Partition &part : ctx.partitions) { auto add = [&](SyntheticSection &sec) { sec.partition = part.getNumber(); ctx.inputSections.push_back(&sec); @@ -4811,101 +4812,102 @@ template <class ELFT> void elf::createSyntheticSections() { } } - if (partitions.size() != 1) { + if (ctx.partitions.size() != 1) { // Create the partition end marker. This needs to be in partition number 255 // so that it is sorted after all other partitions. It also has other // special handling (see createPhdrs() and combineEhSections()). - in.partEnd = + ctx.in.partEnd = std::make_unique<BssSection>(".part.end", config->maxPageSize, 1); - in.partEnd->partition = 255; - add(*in.partEnd); + ctx.in.partEnd->partition = 255; + add(*ctx.in.partEnd); - in.partIndex = std::make_unique<PartitionIndexSection>(); - addOptionalRegular("__part_index_begin", in.partIndex.get(), 0); - addOptionalRegular("__part_index_end", in.partIndex.get(), - in.partIndex->getSize()); - add(*in.partIndex); + ctx.in.partIndex = std::make_unique<PartitionIndexSection>(); + addOptionalRegular("__part_index_begin", ctx.in.partIndex.get(), 0); + addOptionalRegular("__part_index_end", ctx.in.partIndex.get(), + ctx.in.partIndex->getSize()); + add(*ctx.in.partIndex); } // Add .got. MIPS' .got is so different from the other archs, // it has its own class. if (config->emachine == EM_MIPS) { - in.mipsGot = std::make_unique<MipsGotSection>(); - add(*in.mipsGot); + ctx.in.mipsGot = std::make_unique<MipsGotSection>(); + add(*ctx.in.mipsGot); } else { - in.got = std::make_unique<GotSection>(); - add(*in.got); + ctx.in.got = std::make_unique<GotSection>(); + add(*ctx.in.got); } if (config->emachine == EM_PPC) { - in.ppc32Got2 = std::make_unique<PPC32Got2Section>(); - add(*in.ppc32Got2); + ctx.in.ppc32Got2 = std::make_unique<PPC32Got2Section>(); + add(*ctx.in.ppc32Got2); } if (config->emachine == EM_PPC64) { - in.ppc64LongBranchTarget = std::make_unique<PPC64LongBranchTargetSection>(); - add(*in.ppc64LongBranchTarget); + ctx.in.ppc64LongBranchTarget = + std::make_unique<PPC64LongBranchTargetSection>(); + add(*ctx.in.ppc64LongBranchTarget); } - in.gotPlt = std::make_unique<GotPltSection>(); - add(*in.gotPlt); - in.igotPlt = std::make_unique<IgotPltSection>(); - add(*in.igotPlt); + ctx.in.gotPlt = std::make_unique<GotPltSection>(); + add(*ctx.in.gotPlt); + ctx.in.igotPlt = std::make_unique<IgotPltSection>(); + add(*ctx.in.igotPlt); // Add .relro_padding if DATA_SEGMENT_RELRO_END is used; otherwise, add the // section in the absence of PHDRS/SECTIONS commands. if (config->zRelro && ((ctx.script->phdrsCommands.empty() && !ctx.script->hasSectionsCommand) || ctx.script->seenRelroEnd)) { - in.relroPadding = std::make_unique<RelroPaddingSection>(); - add(*in.relroPadding); + ctx.in.relroPadding = std::make_unique<RelroPaddingSection>(); + add(*ctx.in.relroPadding); } if (config->emachine == EM_ARM) { - in.armCmseSGSection = std::make_unique<ArmCmseSGSection>(); - add(*in.armCmseSGSection); + ctx.in.armCmseSGSection = std::make_unique<ArmCmseSGSection>(); + add(*ctx.in.armCmseSGSection); } // _GLOBAL_OFFSET_TABLE_ is defined relative to either .got.plt or .got. Treat // it as a relocation and ensure the referenced section is created. if (ctx.sym.globalOffsetTable && config->emachine != EM_MIPS) { if (ctx.target->gotBaseSymInGotPlt) - in.gotPlt->hasGotPltOffRel = true; + ctx.in.gotPlt->hasGotPltOffRel = true; else - in.got->hasGotOffRel = true; + ctx.in.got->hasGotOffRel = true; } // We always need to add rel[a].plt to output if it has entries. // Even for static linking it can contain R_[*]_IRELATIVE relocations. - in.relaPlt = std::make_unique<RelocationSection<ELFT>>( + ctx.in.relaPlt = std::make_unique<RelocationSection<ELFT>>( config->isRela ? ".rela.plt" : ".rel.plt", /*sort=*/false, /*threadCount=*/1); - add(*in.relaPlt); + add(*ctx.in.relaPlt); if ((config->emachine == EM_386 || config->emachine == EM_X86_64) && (config->andFeatures & GNU_PROPERTY_X86_FEATURE_1_IBT)) { - in.ibtPlt = std::make_unique<IBTPltSection>(); - add(*in.ibtPlt); + ctx.in.ibtPlt = std::make_unique<IBTPltSection>(); + add(*ctx.in.ibtPlt); } if (config->emachine == EM_PPC) - in.plt = std::make_unique<PPC32GlinkSection>(); + ctx.in.plt = std::make_unique<PPC32GlinkSection>(); else - in.plt = std::make_unique<PltSection>(); - add(*in.plt); - in.iplt = std::make_unique<IpltSection>(); - add(*in.iplt); + ctx.in.plt = std::make_unique<PltSection>(); + add(*ctx.in.plt); + ctx.in.iplt = std::make_unique<IpltSection>(); + add(*ctx.in.iplt); if (config->andFeatures || !ctx.aarch64PauthAbiCoreInfo.empty()) add(*make<GnuPropertySection>()); if (config->debugNames) { - in.debugNames = std::make_unique<DebugNamesSection<ELFT>>(); - add(*in.debugNames); + ctx.in.debugNames = std::make_unique<DebugNamesSection<ELFT>>(); + add(*ctx.in.debugNames); } if (config->gdbIndex) { - in.gdbIndex = GdbIndexSection::create<ELFT>(); - add(*in.gdbIndex); + ctx.in.gdbIndex = GdbIndexSection::create<ELFT>(); + add(*ctx.in.gdbIndex); } // .note.GNU-stack is always added when we are creating a re-linkable @@ -4916,20 +4918,16 @@ template <class ELFT> void elf::createSyntheticSections() { if (config->relocatable) add(*make<GnuStackSection>()); - if (in.symTab) - add(*in.symTab); - if (in.symTabShndx) - add(*in.symTabShndx); - if (in.shStrTab) - add(*in.shStrTab); - if (in.strTab) - add(*in.strTab); + if (ctx.in.symTab) + add(*ctx.in.symTab); + if (ctx.in.symTabShndx) + add(*ctx.in.symTabShndx); + if (ctx.in.shStrTab) + add(*ctx.in.shStrTab); + if (ctx.in.strTab) + add(*ctx.in.strTab); } -InStruct elf::in; - -std::vector<Partition> elf::partitions; - template void elf::splitSections<ELF32LE>(); template void elf::splitSections<ELF32BE>(); template void elf::splitSections<ELF64LE>(); diff --git a/lld/ELF/SyntheticSections.h b/lld/ELF/SyntheticSections.h index 56647f4..8e2664d 100644 --- a/lld/ELF/SyntheticSections.h +++ b/lld/ELF/SyntheticSections.h @@ -1471,52 +1471,14 @@ struct Partition { std::unique_ptr<SyntheticSection> verNeed; std::unique_ptr<VersionTableSection> verSym; - unsigned getNumber() const { return this - &partitions[0] + 1; } + unsigned getNumber() const { return this - &ctx.partitions[0] + 1; } }; inline Partition &SectionBase::getPartition() const { assert(isLive()); - return partitions[partition - 1]; + return ctx.partitions[partition - 1]; } -// Linker generated sections which can be used as inputs and are not specific to -// a partition. -struct InStruct { - std::unique_ptr<InputSection> attributes; - std::unique_ptr<SyntheticSection> riscvAttributes; - std::unique_ptr<BssSection> bss; - std::unique_ptr<BssSection> bssRelRo; - std::unique_ptr<GotSection> got; - std::unique_ptr<GotPltSection> gotPlt; - std::unique_ptr<IgotPltSection> igotPlt; - std::unique_ptr<RelroPaddingSection> relroPadding; - std::unique_ptr<SyntheticSection> armCmseSGSection; - std::unique_ptr<PPC64LongBranchTargetSection> ppc64LongBranchTarget; - std::unique_ptr<SyntheticSection> mipsAbiFlags; - std::unique_ptr<MipsGotSection> mipsGot; - std::unique_ptr<SyntheticSection> mipsOptions; - std::unique_ptr<SyntheticSection> mipsReginfo; - std::unique_ptr<MipsRldMapSection> mipsRldMap; - std::unique_ptr<SyntheticSection> partEnd; - std::unique_ptr<SyntheticSection> partIndex; - std::unique_ptr<PltSection> plt; - std::unique_ptr<IpltSection> iplt; - std::unique_ptr<PPC32Got2Section> ppc32Got2; - std::unique_ptr<IBTPltSection> ibtPlt; - std::unique_ptr<RelocationBaseSection> relaPlt; - // Non-SHF_ALLOC sections - std::unique_ptr<SyntheticSection> debugNames; - std::unique_ptr<GdbIndexSection> gdbIndex; - std::unique_ptr<StringTableSection> shStrTab; - std::unique_ptr<StringTableSection> strTab; - std::unique_ptr<SymbolTableBaseSection> symTab; - std::unique_ptr<SymtabShndxSection> symTabShndx; - - void reset(); -}; - -LLVM_LIBRARY_VISIBILITY extern InStruct in; - } // namespace lld::elf #endif diff --git a/lld/ELF/Thunks.cpp b/lld/ELF/Thunks.cpp index fe83c08..349e313 100644 --- a/lld/ELF/Thunks.cpp +++ b/lld/ELF/Thunks.cpp @@ -473,9 +473,9 @@ public: : PPC64LongBranchThunk(dest, addend) { assert(!dest.isPreemptible); if (std::optional<uint32_t> index = - in.ppc64LongBranchTarget->addEntry(&dest, addend)) { + ctx.in.ppc64LongBranchTarget->addEntry(&dest, addend)) { ctx.mainPart->relaDyn->addRelativeReloc( - ctx.target->relativeRel, *in.ppc64LongBranchTarget, + ctx.target->relativeRel, *ctx.in.ppc64LongBranchTarget, *index * UINT64_C(8), dest, addend + getPPC64GlobalEntryToLocalEntryOffset(dest.stOther), ctx.target->symbolicRel, R_ABS); @@ -487,7 +487,7 @@ class PPC64PDLongBranchThunk final : public PPC64LongBranchThunk { public: PPC64PDLongBranchThunk(Symbol &dest, int64_t addend) : PPC64LongBranchThunk(dest, addend) { - in.ppc64LongBranchTarget->addEntry(&dest, addend); + ctx.in.ppc64LongBranchTarget->addEntry(&dest, addend); } }; @@ -1052,12 +1052,12 @@ void elf::writePPC32PltCallStub(uint8_t *buf, uint64_t gotPltVA, // almost always 0x8000. The address of .got2 is different in another object // file, so a stub cannot be shared. offset = gotPltVA - - (in.ppc32Got2->getParent()->getVA() + + (ctx.in.ppc32Got2->getParent()->getVA() + (file->ppc32Got2 ? file->ppc32Got2->outSecOff : 0) + addend); } else { // The stub loads an address relative to _GLOBAL_OFFSET_TABLE_ (which is // currently the address of .got). - offset = gotPltVA - in.got->getVA(); + offset = gotPltVA - ctx.in.got->getVA(); } uint16_t ha = (offset + 0x8000) >> 16, l = (uint16_t)offset; if (ha == 0) { @@ -1088,7 +1088,7 @@ void PPC32PltCallStub::addSymbols(ThunkSection &isec) { else os << ".plt_pic32."; os << destination.getName(); - addSymbol(saver().save(os.str()), STT_FUNC, 0, isec); + addSymbol(saver().save(buf), STT_FUNC, 0, isec); } bool PPC32PltCallStub::isCompatibleWith(const InputSection &isec, @@ -1176,9 +1176,9 @@ void PPC64R2SaveStub::writeTo(uint8_t *buf) { write32(buf + nextInstOffset, MTCTR_R12); // mtctr r12 write32(buf + nextInstOffset + 4, BCTR); // bctr } else { - in.ppc64LongBranchTarget->addEntry(&destination, addend); + ctx.in.ppc64LongBranchTarget->addEntry(&destination, addend); const int64_t offsetFromTOC = - in.ppc64LongBranchTarget->getEntryVA(&destination, addend) - + ctx.in.ppc64LongBranchTarget->getEntryVA(&destination, addend) - getPPC64TocBase(); writePPC64LoadAndBranch(buf + 4, offsetFromTOC); } @@ -1238,8 +1238,9 @@ bool PPC64R12SetupStub::isCompatibleWith(const InputSection &isec, } void PPC64LongBranchThunk::writeTo(uint8_t *buf) { - int64_t offset = in.ppc64LongBranchTarget->getEntryVA(&destination, addend) - - getPPC64TocBase(); + int64_t offset = + ctx.in.ppc64LongBranchTarget->getEntryVA(&destination, addend) - + getPPC64TocBase(); writePPC64LoadAndBranch(buf, offset); } diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp index 0165253..8999fdc 100644 --- a/lld/ELF/Writer.cpp +++ b/lld/ELF/Writer.cpp @@ -118,7 +118,7 @@ static void removeEmptyPTLoad(SmallVector<PhdrEntry *, 0> &phdrs) { void elf::copySectionsIntoPartitions() { SmallVector<InputSectionBase *, 0> newSections; const size_t ehSize = ctx.ehInputSections.size(); - for (unsigned part = 2; part != partitions.size() + 1; ++part) { + for (unsigned part = 2; part != ctx.partitions.size() + 1; ++part) { for (InputSectionBase *s : ctx.inputSections) { if (!(s->flags & SHF_ALLOC) || !s->isLive() || s->type != SHT_NOTE) continue; @@ -320,7 +320,7 @@ template <class ELFT> void Writer<ELFT>::run() { // Remove empty PT_LOAD to avoid causing the dynamic linker to try to mmap a // 0 sized region. This has to be done late since only after assignAddresses // we know the size of the sections. - for (Partition &part : partitions) + for (Partition &part : ctx.partitions) removeEmptyPTLoad(part.phdrs); if (!config->oFormatBinary) @@ -328,7 +328,7 @@ template <class ELFT> void Writer<ELFT>::run() { else assignFileOffsetsBinary(); - for (Partition &part : partitions) + for (Partition &part : ctx.partitions) setPhdrs(part); // Handle --print-map(-M)/--Map and --cref. Dump them before checkSections() @@ -485,8 +485,8 @@ static void demoteAndCopyLocalSymbols() { if (dr->section && !dr->section->isLive()) demoteDefined(*dr, sectionIndexMap); - else if (in.symTab && includeInSymtab(*b) && shouldKeepInSymtab(*dr)) - in.symTab->addSymbol(b); + else if (ctx.in.symTab && includeInSymtab(*b) && shouldKeepInSymtab(*dr)) + ctx.in.symTab->addSymbol(b); } } } @@ -529,9 +529,9 @@ template <class ELFT> void Writer<ELFT>::addSectionSymbols() { // Set the symbol to be relative to the output section so that its st_value // equals the output section address. Note, there may be a gap between the // start of the output section and isec. - in.symTab->addSymbol(makeDefined(isec->file, "", STB_LOCAL, /*stOther=*/0, - STT_SECTION, - /*value=*/0, /*size=*/0, &osec)); + ctx.in.symTab->addSymbol(makeDefined(isec->file, "", STB_LOCAL, + /*stOther=*/0, STT_SECTION, + /*value=*/0, /*size=*/0, &osec)); } } @@ -578,7 +578,7 @@ static bool isRelroSection(const OutputSection *sec) { // .got contains pointers to external symbols. They are resolved by // the dynamic linker when a module is loaded into memory, and after // that they are not expected to change. So, it can be in RELRO. - if (in.got && sec == in.got->getParent()) + if (ctx.in.got && sec == ctx.in.got->getParent()) return true; // .toc is a GOT-ish section for PowerPC64. Their contents are accessed @@ -593,10 +593,10 @@ static bool isRelroSection(const OutputSection *sec) { // by default resolved lazily, so we usually cannot put it into RELRO. // However, if "-z now" is given, the lazy symbol resolution is // disabled, which enables us to put it into RELRO. - if (sec == in.gotPlt->getParent()) + if (sec == ctx.in.gotPlt->getParent()) return config->zNow; - if (in.relroPadding && sec == in.relroPadding->getParent()) + if (ctx.in.relroPadding && sec == ctx.in.relroPadding->getParent()) return true; // .dynamic section contains data for the dynamic linker, and @@ -825,10 +825,10 @@ template <class ELFT> void Writer<ELFT>::setReservedSymbolSections() { if (ctx.sym.globalOffsetTable) { // The _GLOBAL_OFFSET_TABLE_ symbol is defined by target convention usually // to the start of the .got or .got.plt section. - InputSection *sec = in.gotPlt.get(); + InputSection *sec = ctx.in.gotPlt.get(); if (!ctx.target->gotBaseSymInGotPlt) - sec = in.mipsGot ? cast<InputSection>(in.mipsGot.get()) - : cast<InputSection>(in.got.get()); + sec = ctx.in.mipsGot ? cast<InputSection>(ctx.in.mipsGot.get()) + : cast<InputSection>(ctx.in.got.get()); ctx.sym.globalOffsetTable->section = sec; } @@ -844,7 +844,7 @@ template <class ELFT> void Writer<ELFT>::setReservedSymbolSections() { auto isLarge = [](OutputSection *osec) { return config->emachine == EM_X86_64 && osec->flags & SHF_X86_64_LARGE; }; - for (Partition &part : partitions) { + for (Partition &part : ctx.partitions) { for (PhdrEntry *p : part.phdrs) { if (p->p_type != PT_LOAD) continue; @@ -953,7 +953,7 @@ findOrphanPos(SmallVectorImpl<SectionCommand *>::iterator b, // As a special case, place .relro_padding before the SymbolAssignment using // DATA_SEGMENT_RELRO_END, if present. - if (in.relroPadding && sec == in.relroPadding->getParent()) { + if (ctx.in.relroPadding && sec == ctx.in.relroPadding->getParent()) { auto i = std::find_if(b, e, [=](SectionCommand *a) { if (auto *assign = dyn_cast<SymbolAssignment>(a)) return assign->dataSegmentRelroEnd; @@ -1443,7 +1443,7 @@ template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() { // increasing. Anything here must be repeatable, since spilling may change // section order. const auto finalizeOrderDependentContent = [this] { - for (Partition &part : partitions) + for (Partition &part : ctx.partitions) finalizeSynthetic(part.armExidx.get()); resolveShfLinkOrder(); }; @@ -1481,11 +1481,11 @@ template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() { changed |= a32p.createFixes(); } - finalizeSynthetic(in.got.get()); - if (in.mipsGot) - in.mipsGot->updateAllocSize(); + finalizeSynthetic(ctx.in.got.get()); + if (ctx.in.mipsGot) + ctx.in.mipsGot->updateAllocSize(); - for (Partition &part : partitions) { + for (Partition &part : ctx.partitions) { // The R_AARCH64_AUTH_RELATIVE has a smaller addend field as bits [63:32] // encode the signing schema. We've put relocations in .relr.auth.dyn // during RelocationScanner::processAux, but the target VA for some of @@ -1777,7 +1777,7 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() { // earlier. { llvm::TimeTraceScope timeScope("Finalize .eh_frame"); - for (Partition &part : partitions) + for (Partition &part : ctx.partitions) finalizeSynthetic(part.ehFrame.get()); } } @@ -1805,10 +1805,10 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() { reportUndefinedSymbols(); postScanRelocations(); - if (in.plt && in.plt->isNeeded()) - in.plt->addSymbols(); - if (in.iplt && in.iplt->isNeeded()) - in.iplt->addSymbols(); + if (ctx.in.plt && ctx.in.plt->isNeeded()) + ctx.in.plt->addSymbols(); + if (ctx.in.iplt && ctx.in.iplt->isNeeded()) + ctx.in.iplt->addSymbols(); if (config->unresolvedSymbolsInShlib != UnresolvedPolicy::Ignore) { auto diagnose = @@ -1861,11 +1861,11 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() { continue; if (!config->relocatable) sym->binding = sym->computeBinding(); - if (in.symTab) - in.symTab->addSymbol(sym); + if (ctx.in.symTab) + ctx.in.symTab->addSymbol(sym); if (sym->includeInDynsym()) { - partitions[sym->partition - 1].dynSymTab->addSymbol(sym); + ctx.partitions[sym->partition - 1].dynSymTab->addSymbol(sym); if (auto *file = dyn_cast_or_null<SharedFile>(sym->file)) if (file->isNeeded && !sym->isUndefined()) addVerneed(sym); @@ -1874,7 +1874,8 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() { // We also need to scan the dynamic relocation tables of the other // partitions and add any referenced symbols to the partition's dynsym. - for (Partition &part : MutableArrayRef<Partition>(partitions).slice(1)) { + for (Partition &part : + MutableArrayRef<Partition>(ctx.partitions).slice(1)) { DenseSet<Symbol *> syms; for (const SymbolTableEntry &e : part.dynSymTab->getSymbols()) syms.insert(e.sym); @@ -1885,8 +1886,8 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() { } } - if (in.mipsGot) - in.mipsGot->build(); + if (ctx.in.mipsGot) + ctx.in.mipsGot->build(); removeUnusedSyntheticSections(); ctx.script->diagnoseOrphanHandling(); @@ -1895,16 +1896,17 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() { sortSections(); // Create a list of OutputSections, assign sectionIndex, and populate - // in.shStrTab. If -z nosectionheader is specified, drop non-ALLOC sections. + // ctx.in.shStrTab. If -z nosectionheader is specified, drop non-ALLOC + // sections. for (SectionCommand *cmd : ctx.script->sectionCommands) if (auto *osd = dyn_cast<OutputDesc>(cmd)) { OutputSection *osec = &osd->osec; - if (!in.shStrTab && !(osec->flags & SHF_ALLOC)) + if (!ctx.in.shStrTab && !(osec->flags & SHF_ALLOC)) continue; ctx.outputSections.push_back(osec); osec->sectionIndex = ctx.outputSections.size(); - if (in.shStrTab) - osec->shName = in.shStrTab->addString(osec->name); + if (ctx.in.shStrTab) + osec->shName = ctx.in.shStrTab->addString(osec->name); } // Prefer command line supplied address over other constraints. @@ -1922,7 +1924,7 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() { symtab.addSymbol(Undefined{ctx.internalFile, "__tls_get_addr", STB_GLOBAL, STV_DEFAULT, STT_NOTYPE}); sym->isPreemptible = true; - partitions[0].dynSymTab->addSymbol(sym); + ctx.partitions[0].dynSymTab->addSymbol(sym); } // This is a bit of a hack. A value of 0 means undef, so we set it @@ -1935,7 +1937,7 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() { // The headers have to be created before finalize as that can influence the // image base and the dynamic section on mips includes the image base. if (!config->relocatable && !config->oFormatBinary) { - for (Partition &part : partitions) { + for (Partition &part : ctx.partitions) { part.phdrs = ctx.script->hasPhdrsCommands() ? ctx.script->createPhdrs() : createPhdrs(part); if (config->emachine == EM_ARM) { @@ -1976,24 +1978,24 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() { { llvm::TimeTraceScope timeScope("Finalize synthetic sections"); - finalizeSynthetic(in.bss.get()); - finalizeSynthetic(in.bssRelRo.get()); - finalizeSynthetic(in.symTabShndx.get()); - finalizeSynthetic(in.shStrTab.get()); - finalizeSynthetic(in.strTab.get()); - finalizeSynthetic(in.got.get()); - finalizeSynthetic(in.mipsGot.get()); - finalizeSynthetic(in.igotPlt.get()); - finalizeSynthetic(in.gotPlt.get()); - finalizeSynthetic(in.relaPlt.get()); - finalizeSynthetic(in.plt.get()); - finalizeSynthetic(in.iplt.get()); - finalizeSynthetic(in.ppc32Got2.get()); - finalizeSynthetic(in.partIndex.get()); + finalizeSynthetic(ctx.in.bss.get()); + finalizeSynthetic(ctx.in.bssRelRo.get()); + finalizeSynthetic(ctx.in.symTabShndx.get()); + finalizeSynthetic(ctx.in.shStrTab.get()); + finalizeSynthetic(ctx.in.strTab.get()); + finalizeSynthetic(ctx.in.got.get()); + finalizeSynthetic(ctx.in.mipsGot.get()); + finalizeSynthetic(ctx.in.igotPlt.get()); + finalizeSynthetic(ctx.in.gotPlt.get()); + finalizeSynthetic(ctx.in.relaPlt.get()); + finalizeSynthetic(ctx.in.plt.get()); + finalizeSynthetic(ctx.in.iplt.get()); + finalizeSynthetic(ctx.in.ppc32Got2.get()); + finalizeSynthetic(ctx.in.partIndex.get()); // Dynamic section must be the last one in this list and dynamic // symbol table section (dynSymTab) must be the first one. - for (Partition &part : partitions) { + for (Partition &part : ctx.partitions) { if (part.relaDyn) { part.relaDyn->mergeRels(); // Compute DT_RELACOUNT to be used by part.dynamic. @@ -2054,14 +2056,14 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() { llvm::TimeTraceScope timeScope("Finalize synthetic sections"); // finalizeAddressDependentContent may have added local symbols to the // static symbol table. - finalizeSynthetic(in.symTab.get()); - finalizeSynthetic(in.debugNames.get()); - finalizeSynthetic(in.ppc64LongBranchTarget.get()); - finalizeSynthetic(in.armCmseSGSection.get()); + finalizeSynthetic(ctx.in.symTab.get()); + finalizeSynthetic(ctx.in.debugNames.get()); + finalizeSynthetic(ctx.in.ppc64LongBranchTarget.get()); + finalizeSynthetic(ctx.in.armCmseSGSection.get()); } // Relaxation to delete inter-basic block jumps created by basic block - // sections. Run after in.symTab is finalized as optimizeBasicBlockJumps + // sections. Run after ctx.in.symTab is finalized as optimizeBasicBlockJumps // can relax jump instructions based on symbol offset. if (config->optimizeBBJumps) optimizeBasicBlockJumps(); @@ -2437,7 +2439,7 @@ template <class ELFT> void Writer<ELFT>::fixSectionAlignments() { } }; - for (Partition &part : partitions) { + for (Partition &part : ctx.partitions) { prev = nullptr; for (const PhdrEntry *p : part.phdrs) if (p->p_type == PT_LOAD && p->firstSec) { @@ -2503,7 +2505,7 @@ template <class ELFT> void Writer<ELFT>::assignFileOffsets() { uint64_t off = ctx.out.elfHeader->size + ctx.out.programHeaders->size; PhdrEntry *lastRX = nullptr; - for (Partition &part : partitions) + for (Partition &part : ctx.partitions) for (PhdrEntry *p : part.phdrs) if (p->p_type == PT_LOAD && (p->p_flags & PF_X)) lastRX = p; @@ -2730,7 +2732,7 @@ template <class ELFT> void Writer<ELFT>::writeHeader() { eHdr->e_entry = getEntryAddr(); // If -z nosectionheader is specified, omit the section header table. - if (!in.shStrTab) + if (!ctx.in.shStrTab) return; eHdr->e_shoff = sectionHeaderOff; @@ -2750,7 +2752,7 @@ template <class ELFT> void Writer<ELFT>::writeHeader() { else eHdr->e_shnum = num; - uint32_t strTabIndex = in.shStrTab->getParent()->sectionIndex; + uint32_t strTabIndex = ctx.in.shStrTab->getParent()->sectionIndex; if (strTabIndex >= SHN_LORESERVE) { sHdrs->sh_link = strTabIndex; eHdr->e_shstrndx = SHN_XINDEX; @@ -2772,7 +2774,7 @@ template <class ELFT> void Writer<ELFT>::openFile() { << "section sizes:\n"; for (OutputSection *os : ctx.outputSections) s << os->name << ' ' << os->size << "\n"; - error(s.str()); + error(msg); return; } @@ -2813,7 +2815,7 @@ static void fillTrap(uint8_t *i, uint8_t *end) { // We'll leave other pages in segments as-is because the rest will be // overwritten by output sections. template <class ELFT> void Writer<ELFT>::writeTrapInstr() { - for (Partition &part : partitions) { + for (Partition &part : ctx.partitions) { // Fill the last page. for (PhdrEntry *p : part.phdrs) if (p->p_type == PT_LOAD && (p->p_flags & PF_X)) @@ -2890,7 +2892,7 @@ template <class ELFT> void Writer<ELFT>::writeBuildId() { return; if (config->buildId == BuildIdKind::Hexstring) { - for (Partition &part : partitions) + for (Partition &part : ctx.partitions) part.buildId->writeBuildId(config->buildIdVector); return; } @@ -2930,7 +2932,7 @@ template <class ELFT> void Writer<ELFT>::writeBuildId() { default: llvm_unreachable("unknown BuildIdKind"); } - for (Partition &part : partitions) + for (Partition &part : ctx.partitions) part.buildId->writeBuildId(output); } diff --git a/lld/MachO/CMakeLists.txt b/lld/MachO/CMakeLists.txt index 385a5f5..ecf6ce6 100644 --- a/lld/MachO/CMakeLists.txt +++ b/lld/MachO/CMakeLists.txt @@ -40,6 +40,7 @@ add_lld_library(lldMachO BinaryFormat BitReader BitWriter + CGData Core DebugInfoDWARF Demangle diff --git a/lld/MachO/Config.h b/lld/MachO/Config.h index 5fca3f1..8f6da63 100644 --- a/lld/MachO/Config.h +++ b/lld/MachO/Config.h @@ -210,6 +210,7 @@ struct Configuration { std::vector<SectionAlign> sectionAlignments; std::vector<SegmentProtection> segmentProtections; bool ltoDebugPassManager = false; + llvm::StringRef codegenDataGeneratePath; bool csProfileGenerate = false; llvm::StringRef csProfilePath; bool pgoWarnMismatch; diff --git a/lld/MachO/Driver.cpp b/lld/MachO/Driver.cpp index 73b8312..ab4abb1 100644 --- a/lld/MachO/Driver.cpp +++ b/lld/MachO/Driver.cpp @@ -36,6 +36,7 @@ #include "llvm/ADT/StringRef.h" #include "llvm/BinaryFormat/MachO.h" #include "llvm/BinaryFormat/Magic.h" +#include "llvm/CGData/CodeGenDataWriter.h" #include "llvm/Config/llvm-config.h" #include "llvm/LTO/LTO.h" #include "llvm/Object/Archive.h" @@ -949,7 +950,6 @@ static void parseClangOption(StringRef opt, const Twine &msg) { const char *argv[] = {"lld", opt.data()}; if (cl::ParseCommandLineOptions(2, argv, "", &os)) return; - os.flush(); error(msg + ": " + StringRef(err).trim()); } @@ -1322,6 +1322,37 @@ static void gatherInputSections() { } } +static void codegenDataGenerate() { + TimeTraceScope timeScope("Generating codegen data"); + + OutlinedHashTreeRecord globalOutlineRecord; + for (ConcatInputSection *isec : inputSections) + if (isec->getSegName() == segment_names::data && + isec->getName() == section_names::outlinedHashTree) { + // Read outlined hash tree from each section. + OutlinedHashTreeRecord localOutlineRecord; + auto *data = isec->data.data(); + localOutlineRecord.deserialize(data); + + // Merge it to the global hash tree. + globalOutlineRecord.merge(localOutlineRecord); + } + + CodeGenDataWriter Writer; + if (!globalOutlineRecord.empty()) + Writer.addRecord(globalOutlineRecord); + + std::error_code EC; + auto fileName = config->codegenDataGeneratePath; + assert(!fileName.empty()); + raw_fd_ostream Output(fileName, EC, sys::fs::OF_None); + if (EC) + error("fail to create " + fileName + ": " + EC.message()); + + if (auto E = Writer.write(Output)) + error("fail to write CGData: " + toString(std::move(E))); +} + static void foldIdenticalLiterals() { TimeTraceScope timeScope("Fold identical literals"); // We always create a cStringSection, regardless of whether dedupLiterals is @@ -1759,6 +1790,8 @@ bool link(ArrayRef<const char *> argsArr, llvm::raw_ostream &stdoutOS, config->ignoreAutoLinkOptions.insert(arg->getValue()); config->strictAutoLink = args.hasArg(OPT_strict_auto_link); config->ltoDebugPassManager = args.hasArg(OPT_lto_debug_pass_manager); + config->codegenDataGeneratePath = + args.getLastArgValue(OPT_codegen_data_generate_path); config->csProfileGenerate = args.hasArg(OPT_cs_profile_generate); config->csProfilePath = args.getLastArgValue(OPT_cs_profile_path); config->pgoWarnMismatch = @@ -2103,6 +2136,10 @@ bool link(ArrayRef<const char *> argsArr, llvm::raw_ostream &stdoutOS, } gatherInputSections(); + + if (!config->codegenDataGeneratePath.empty()) + codegenDataGenerate(); + if (config->callGraphProfileSort) priorityBuilder.extractCallGraphProfile(); diff --git a/lld/MachO/InputSection.h b/lld/MachO/InputSection.h index 4e238d8..7ef0e31 100644 --- a/lld/MachO/InputSection.h +++ b/lld/MachO/InputSection.h @@ -354,6 +354,7 @@ constexpr const char objcMethname[] = "__objc_methname"; constexpr const char objcNonLazyCatList[] = "__objc_nlcatlist"; constexpr const char objcNonLazyClassList[] = "__objc_nlclslist"; constexpr const char objcProtoList[] = "__objc_protolist"; +constexpr const char outlinedHashTree[] = "__llvm_outline"; constexpr const char pageZero[] = "__pagezero"; constexpr const char pointers[] = "__pointers"; constexpr const char rebase[] = "__rebase"; diff --git a/lld/MachO/Options.td b/lld/MachO/Options.td index cbd28bb..70eb7c8 100644 --- a/lld/MachO/Options.td +++ b/lld/MachO/Options.td @@ -162,6 +162,10 @@ def no_objc_category_merging : Flag<["-"], "no_objc_category_merging">, Group<grp_lld>; def lto_debug_pass_manager: Flag<["--"], "lto-debug-pass-manager">, HelpText<"Debug new pass manager">, Group<grp_lld>; +def codegen_data_generate_path : Separate<["--"], "codegen-data-generate-path">, Group<grp_lld>; +def codegen_data_generate_path_eq : Joined<["--"], "codegen-data-generate-path=">, + Alias<!cast<Separate>(codegen_data_generate_path)>, MetaVarName<"<cgdata>">, + HelpText<"Write the CG data to the specified path <cgdata>.">, Group<grp_lld>; def cs_profile_generate: Flag<["--"], "cs-profile-generate">, HelpText<"Perform context sensitive PGO instrumentation">, Group<grp_lld>; def cs_profile_path: Joined<["--"], "cs-profile-path=">, diff --git a/lld/include/lld/Common/CommonLinkerContext.h b/lld/include/lld/Common/CommonLinkerContext.h index 9970dfc..3641bb7 100644 --- a/lld/include/lld/Common/CommonLinkerContext.h +++ b/lld/include/lld/Common/CommonLinkerContext.h @@ -38,7 +38,7 @@ public: llvm::BumpPtrAllocator bAlloc; llvm::StringSaver saver{bAlloc}; - llvm::UniqueStringSaver unique_saver{bAlloc}; + llvm::UniqueStringSaver uniqueSaver{bAlloc}; llvm::DenseMap<void *, SpecificAllocBase *> instances; ErrorHandler e; @@ -57,11 +57,7 @@ bool hasContext(); inline llvm::BumpPtrAllocator &bAlloc() { return context().bAlloc; } inline llvm::StringSaver &saver() { return context().saver; } -inline llvm::UniqueStringSaver &unique_saver() { - // FIXME: Look into other places where duplications are common in saved - // strings and unique saver make sense. - return context().unique_saver; -} +inline llvm::UniqueStringSaver &uniqueSaver() { return context().uniqueSaver; } } // namespace lld #endif diff --git a/lld/test/CMakeLists.txt b/lld/test/CMakeLists.txt index 5d4a275..abc8ea7 100644 --- a/lld/test/CMakeLists.txt +++ b/lld/test/CMakeLists.txt @@ -48,6 +48,7 @@ if (NOT LLD_BUILT_STANDALONE) llvm-ar llvm-as llvm-bcanalyzer + llvm-cgdata llvm-config llvm-cvtres llvm-dis diff --git a/lld/test/MachO/cgdata-generate.s b/lld/test/MachO/cgdata-generate.s new file mode 100644 index 0000000..174df39 --- /dev/null +++ b/lld/test/MachO/cgdata-generate.s @@ -0,0 +1,89 @@ +# UNSUPPORTED: system-windows +# REQUIRES: aarch64 + +# RUN: rm -rf %t; split-file %s %t + +# Synthesize raw cgdata without the header (24 byte) from the indexed cgdata. +# RUN: llvm-cgdata --convert --format binary %t/raw-1.cgtext -o %t/raw-1.cgdata +# RUN: od -t x1 -j 24 -An %t/raw-1.cgdata | tr -d '\n\r\t' | sed 's/[ ][ ]*/ /g; s/^[ ]*//; s/[ ]*$//; s/[ ]/,0x/g; s/^/0x/' > %t/raw-1-bytes.txt +# RUN: sed "s/<RAW_BYTES>/$(cat %t/raw-1-bytes.txt)/g" %t/merge-template.s > %t/merge-1.s +# RUN: llvm-cgdata --convert --format binary %t/raw-2.cgtext -o %t/raw-2.cgdata +# RUN: od -t x1 -j 24 -An %t/raw-2.cgdata | tr -d '\n\r\t' | sed 's/[ ][ ]*/ /g; s/^[ ]*//; s/[ ]*$//; s/[ ]/,0x/g; s/^/0x/' > %t/raw-2-bytes.txt +# RUN: sed "s/<RAW_BYTES>/$(cat %t/raw-2-bytes.txt)/g" %t/merge-template.s > %t/merge-2.s + +# RUN: llvm-mc -filetype obj -triple arm64-apple-darwin %t/merge-1.s -o %t/merge-1.o +# RUN: llvm-mc -filetype obj -triple arm64-apple-darwin %t/merge-2.s -o %t/merge-2.o +# RUN: llvm-mc -filetype obj -triple arm64-apple-darwin %t/main.s -o %t/main.o + +# This checks if the codegen data from the linker is identical to the merged codegen data +# from each object file, which is obtained using the llvm-cgdata tool. +# RUN: %no-arg-lld -dylib -arch arm64 -platform_version ios 14.0 15.0 -o %t/out \ +# RUN: %t/merge-1.o %t/merge-2.o %t/main.o --codegen-data-generate-path=%t/out-cgdata +# RUN: llvm-cgdata --merge %t/merge-1.o %t/merge-2.o %t/main.o -o %t/merge-cgdata +# RUN: diff %t/out-cgdata %t/merge-cgdata + +# Merge order doesn't matter. `main.o` is dropped due to missing __llvm_outline. +# RUN: llvm-cgdata --merge %t/merge-2.o %t/merge-1.o -o %t/merge-cgdata-shuffle +# RUN: diff %t/out-cgdata %t/merge-cgdata-shuffle + +# We can also generate the merged codegen data from the executable that is not dead-stripped. +# RUN: llvm-objdump -h %t/out| FileCheck %s +# CHECK: __llvm_outline +# RUN: llvm-cgdata --merge %t/out -o %t/merge-cgdata-exe +# RUN: diff %t/merge-cgdata-exe %t/merge-cgdata + +# Dead-strip will remove __llvm_outline sections from the final executable. +# But the codeden data is still correctly produced from the linker. +# RUN: %no-arg-lld -dylib -arch arm64 -platform_version ios 14.0 15.0 -o %t/out-strip \ +# RUN: %t/merge-1.o %t/merge-2.o %t/main.o -dead_strip --codegen-data-generate-path=%t/out-cgdata-strip +# RUN: llvm-cgdata --merge %t/merge-1.o %t/merge-2.o %t/main.o -o %t/merge-cgdata-strip +# RUN: diff %t/out-cgdata-strip %t/merge-cgdata-strip +# RUN: diff %t/out-cgdata-strip %t/merge-cgdata + +# Ensure no __llvm_outline section remains in the executable. +# RUN: llvm-objdump -h %t/out-strip | FileCheck %s --check-prefix=STRIP +# STRIP-NOT: __llvm_outline + +#--- raw-1.cgtext +:outlined_hash_tree +0: + Hash: 0x0 + Terminals: 0 + SuccessorIds: [ 1 ] +1: + Hash: 0x1 + Terminals: 0 + SuccessorIds: [ 2 ] +2: + Hash: 0x2 + Terminals: 4 + SuccessorIds: [ ] +... + +#--- raw-2.cgtext +:outlined_hash_tree +0: + Hash: 0x0 + Terminals: 0 + SuccessorIds: [ 1 ] +1: + Hash: 0x1 + Terminals: 0 + SuccessorIds: [ 2 ] +2: + Hash: 0x3 + Terminals: 5 + SuccessorIds: [ ] +... + +#--- merge-template.s +.section __DATA,__llvm_outline +_data: +.byte <RAW_BYTES> + +#--- main.s +.globl _main + +.text +_main: + ret diff --git a/lld/test/lit.cfg.py b/lld/test/lit.cfg.py index d309c2a..859094e 100644 --- a/lld/test/lit.cfg.py +++ b/lld/test/lit.cfg.py @@ -40,6 +40,7 @@ llvm_config.use_lld() tool_patterns = [ "llc", "llvm-as", + "llvm-cgdata", "llvm-mc", "llvm-nm", "llvm-objdump", diff --git a/lld/wasm/OutputSections.cpp b/lld/wasm/OutputSections.cpp index 3974146..b0b2446 100644 --- a/lld/wasm/OutputSections.cpp +++ b/lld/wasm/OutputSections.cpp @@ -42,7 +42,6 @@ void OutputSection::createHeader(size_t bodySize) { debugWrite(os.tell(), "section type [" + getSectionName() + "]"); encodeULEB128(type, os); writeUleb128(os, bodySize, "section size"); - os.flush(); log("createHeader: " + toString(*this) + " body=" + Twine(bodySize) + " total=" + Twine(getSize())); } @@ -50,7 +49,6 @@ void OutputSection::createHeader(size_t bodySize) { void CodeSection::finalizeContents() { raw_string_ostream os(codeSectionHeader); writeUleb128(os, functions.size(), "function count"); - os.flush(); bodySize = codeSectionHeader.size(); for (InputFunction *func : functions) { @@ -112,7 +110,6 @@ void DataSection::finalizeContents() { "output segments should have been combined by now"); writeUleb128(os, segmentCount, "data segment count"); - os.flush(); bodySize = dataSectionHeader.size(); bool is64 = config->is64.value_or(false); @@ -147,7 +144,6 @@ void DataSection::finalizeContents() { } } writeUleb128(os, segment->size, "segment size"); - os.flush(); segment->sectionOffset = bodySize; bodySize += segment->header.size() + segment->size; @@ -245,7 +241,6 @@ void CustomSection::finalizeContents() { raw_string_ostream os(nameData); encodeULEB128(name.size(), os); os << name; - os.flush(); for (InputChunk *section : inputSections) { assert(!section->discarded); diff --git a/lld/wasm/SyntheticSections.cpp b/lld/wasm/SyntheticSections.cpp index 72d08b8..a3bc90c 100644 --- a/lld/wasm/SyntheticSections.cpp +++ b/lld/wasm/SyntheticSections.cpp @@ -38,7 +38,6 @@ public: explicit SubSection(uint32_t type) : type(type) {} void writeTo(raw_ostream &to) { - os.flush(); writeUleb128(to, type, "subsection type"); writeUleb128(to, body.size(), "subsection size"); to.write(body.data(), body.size()); diff --git a/lld/wasm/SyntheticSections.h b/lld/wasm/SyntheticSections.h index 1147996..10183e9 100644 --- a/lld/wasm/SyntheticSections.h +++ b/lld/wasm/SyntheticSections.h @@ -56,7 +56,6 @@ public: void finalizeContents() override { writeBody(); - bodyOutputStream.flush(); createHeader(body.size()); } diff --git a/lld/wasm/Writer.cpp b/lld/wasm/Writer.cpp index 6beef81..681f6a1 100644 --- a/lld/wasm/Writer.cpp +++ b/lld/wasm/Writer.cpp @@ -1875,7 +1875,6 @@ void Writer::createHeader() { raw_string_ostream os(header); writeBytes(os, WasmMagic, sizeof(WasmMagic), "wasm magic"); writeU32(os, WasmVersion, "wasm version"); - os.flush(); fileSize += header.size(); } diff --git a/lldb/examples/synthetic/libcxx.py b/lldb/examples/synthetic/libcxx.py index 474aaa4..b078a4e 100644 --- a/lldb/examples/synthetic/libcxx.py +++ b/lldb/examples/synthetic/libcxx.py @@ -721,6 +721,12 @@ class stddeque_SynthProvider: def update(self): logger = lldb.formatters.Logger.Logger() try: + has_compressed_pair_layout = True + alloc_valobj = self.valobj.GetChildMemberWithName("__alloc_") + size_valobj = self.valobj.GetChildMemberWithName("__size_") + if alloc_valobj.IsValid() and size_valobj.IsValid(): + has_compressed_pair_layout = False + # A deque is effectively a two-dim array, with fixed width. # 'map' contains pointers to the rows of this array. The # full memory area allocated by the deque is delimited @@ -734,9 +740,13 @@ class stddeque_SynthProvider: # variable tells which element in this NxM array is the 0th # one, and the 'size' element gives the number of elements # in the deque. - count = self._get_value_of_compressed_pair( - self.valobj.GetChildMemberWithName("__size_") - ) + if has_compressed_pair_layout: + count = self._get_value_of_compressed_pair( + self.valobj.GetChildMemberWithName("__size_") + ) + else: + count = size_valobj.GetValueAsUnsigned(0) + # give up now if we cant access memory reliably if self.block_size < 0: logger.write("block_size < 0") @@ -748,9 +758,15 @@ class stddeque_SynthProvider: self.map_begin = map_.GetChildMemberWithName("__begin_") map_begin = self.map_begin.GetValueAsUnsigned(0) map_end = map_.GetChildMemberWithName("__end_").GetValueAsUnsigned(0) - map_endcap = self._get_value_of_compressed_pair( - map_.GetChildMemberWithName("__end_cap_") - ) + + if has_compressed_pair_layout: + map_endcap = self._get_value_of_compressed_pair( + map_.GetChildMemberWithName("__end_cap_") + ) + else: + map_endcap = map_.GetChildMemberWithName( + "__end_cap_" + ).GetValueAsUnsigned(0) # check consistency if not map_first <= map_begin <= map_end <= map_endcap: diff --git a/lldb/include/lldb/Utility/Instrumentation.h b/lldb/include/lldb/Utility/Instrumentation.h index 4a9ac81..1a86bfb 100644 --- a/lldb/include/lldb/Utility/Instrumentation.h +++ b/lldb/include/lldb/Utility/Instrumentation.h @@ -70,7 +70,7 @@ template <typename... Ts> inline std::string stringify_args(const Ts &...ts) { std::string buffer; llvm::raw_string_ostream ss(buffer); stringify_helper(ss, ts...); - return ss.str(); + return buffer; } /// RAII object for instrumenting LLDB API functions. diff --git a/lldb/packages/Python/lldbsuite/test/bench.py b/lldb/packages/Python/lldbsuite/test/bench.py deleted file mode 100644 index 1a11b3e..0000000 --- a/lldb/packages/Python/lldbsuite/test/bench.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python - -""" -A simple bench runner which delegates to the ./dotest.py test driver to run the -benchmarks defined in the list named 'benches'. - -You need to hand edit 'benches' to modify/change the command lines passed to the -test driver. - -Use the following to get only the benchmark results in your terminal output: - - ./bench.py -e /Volumes/data/lldb/svn/regression/build/Debug/lldb -x '-F Driver::MainLoop()' 2>&1 | grep -P '^lldb.*benchmark:' -""" - -import os -from optparse import OptionParser - -# dotest.py invocation with no '-e exe-path' uses lldb as the inferior program, -# unless there is a mentioning of custom executable program. -benches = [ - # Measure startup delays creating a target, setting a breakpoint, and run - # to breakpoint stop. - "./dotest.py -v +b %E %X -n -p TestStartupDelays.py", - # Measure 'frame variable' response after stopping at a breakpoint. - "./dotest.py -v +b %E %X -n -p TestFrameVariableResponse.py", - # Measure stepping speed after stopping at a breakpoint. - "./dotest.py -v +b %E %X -n -p TestSteppingSpeed.py", - # Measure expression cmd response with a simple custom executable program. - "./dotest.py +b -n -p TestExpressionCmd.py", - # Attach to a spawned process then run disassembly benchmarks. - "./dotest.py -v +b -n %E -p TestDoAttachThenDisassembly.py", -] - - -def main(): - """Read the items from 'benches' and run the command line one by one.""" - parser = OptionParser( - usage="""\ -%prog [options] -Run the standard benchmarks defined in the list named 'benches'.\ -""" - ) - parser.add_option( - "-e", - "--executable", - type="string", - action="store", - dest="exe", - help="The target program launched by lldb.", - ) - parser.add_option( - "-x", - "--breakpoint-spec", - type="string", - action="store", - dest="break_spec", - help="The lldb breakpoint spec for the target program.", - ) - - # Parses the options, if any. - opts, args = parser.parse_args() - - print("Starting bench runner....") - - for item in benches: - command = item.replace("%E", '-e "%s"' % opts.exe if opts.exe else "") - command = command.replace( - "%X", '-x "%s"' % opts.break_spec if opts.break_spec else "" - ) - print("Running %s" % (command)) - os.system(command) - - print("Bench runner done.") - - -if __name__ == "__main__": - main() diff --git a/lldb/packages/Python/lldbsuite/test/decorators.py b/lldb/packages/Python/lldbsuite/test/decorators.py index 834f01a..34319e2 100644 --- a/lldb/packages/Python/lldbsuite/test/decorators.py +++ b/lldb/packages/Python/lldbsuite/test/decorators.py @@ -426,18 +426,6 @@ def add_test_categories(cat): return impl -def benchmarks_test(func): - """Decorate the item as a benchmarks test.""" - - def should_skip_benchmarks_test(): - return "benchmarks test" - - # Mark this function as such to separate them from the regular tests. - result = skipTestIfFn(should_skip_benchmarks_test)(func) - result.__benchmarks_test__ = True - return result - - def no_debug_info_test(func): """Decorate the item as a test what don't use any debug info. If this annotation is specified then the test runner won't generate a separate test for each debug info format.""" diff --git a/lldb/packages/Python/lldbsuite/test/make/libcxx-simulators-common/compressed_pair.h b/lldb/packages/Python/lldbsuite/test/make/libcxx-simulators-common/compressed_pair.h index 026e718..89eafce 100644 --- a/lldb/packages/Python/lldbsuite/test/make/libcxx-simulators-common/compressed_pair.h +++ b/lldb/packages/Python/lldbsuite/test/make/libcxx-simulators-common/compressed_pair.h @@ -7,7 +7,7 @@ namespace std { namespace __lldb { -// Post-c88580c layout +#if COMPRESSED_PAIR_REV == 0 // Post-c88580c layout struct __value_init_tag {}; struct __default_init_tag {}; @@ -52,6 +52,53 @@ public: _T1 &first() { return static_cast<_Base1 &>(*this).__get(); } }; +#elif COMPRESSED_PAIR_REV == 1 +// From libc++ datasizeof.h +template <class _Tp> struct _FirstPaddingByte { + [[no_unique_address]] _Tp __v_; + char __first_padding_byte_; +}; + +template <class _Tp> +inline const size_t __datasizeof_v = + __builtin_offsetof(_FirstPaddingByte<_Tp>, __first_padding_byte_); + +template <class _Tp> +struct __lldb_is_final : public integral_constant<bool, __is_final(_Tp)> {}; + +template <class _ToPad> class __compressed_pair_padding { + char __padding_[((is_empty<_ToPad>::value && + !__lldb_is_final<_ToPad>::value) || + is_reference<_ToPad>::value) + ? 0 + : sizeof(_ToPad) - __datasizeof_v<_ToPad>]; +}; + +#define _LLDB_COMPRESSED_PAIR(T1, Initializer1, T2, Initializer2) \ + [[__gnu__::__aligned__(alignof(T2))]] [[no_unique_address]] T1 Initializer1; \ + [[no_unique_address]] __compressed_pair_padding<T1> __padding1_; \ + [[no_unique_address]] T2 Initializer2; \ + [[no_unique_address]] __compressed_pair_padding<T2> __padding2_; + +#define _LLDB_COMPRESSED_TRIPLE(T1, Initializer1, T2, Initializer2, T3, \ + Initializer3) \ + [[using __gnu__: __aligned__(alignof(T2)), \ + __aligned__(alignof(T3))]] [[no_unique_address]] T1 Initializer1; \ + [[no_unique_address]] __compressed_pair_padding<T1> __padding1_; \ + [[no_unique_address]] T2 Initializer2; \ + [[no_unique_address]] __compressed_pair_padding<T2> __padding2_; \ + [[no_unique_address]] T3 Initializer3; \ + [[no_unique_address]] __compressed_pair_padding<T3> __padding3_; +#elif COMPRESSED_PAIR_REV == 2 +#define _LLDB_COMPRESSED_PAIR(T1, Name1, T2, Name2) \ + [[no_unique_address]] T1 Name1; \ + [[no_unique_address]] T2 Name2 + +#define _LLDB_COMPRESSED_TRIPLE(T1, Name1, T2, Name2, T3, Name3) \ + [[no_unique_address]] T1 Name1; \ + [[no_unique_address]] T2 Name2; \ + [[no_unique_address]] T3 Name3 +#endif } // namespace __lldb } // namespace std diff --git a/lldb/source/Breakpoint/Breakpoint.cpp b/lldb/source/Breakpoint/Breakpoint.cpp index 3268ce0..54ebafc3 100644 --- a/lldb/source/Breakpoint/Breakpoint.cpp +++ b/lldb/source/Breakpoint/Breakpoint.cpp @@ -1127,7 +1127,7 @@ json::Value Breakpoint::GetStatistics() { llvm::raw_string_ostream ss(buffer); json::OStream json_os(ss); bp_data_sp->Serialize(json_os); - if (auto expected_value = llvm::json::parse(ss.str())) { + if (auto expected_value = llvm::json::parse(buffer)) { bp.try_emplace("details", std::move(*expected_value)); } else { std::string details_error = toString(expected_value.takeError()); diff --git a/lldb/source/Commands/CommandObjectLog.cpp b/lldb/source/Commands/CommandObjectLog.cpp index 9eb68dd..5fb2dfa 100644 --- a/lldb/source/Commands/CommandObjectLog.cpp +++ b/lldb/source/Commands/CommandObjectLog.cpp @@ -204,7 +204,7 @@ protected: channel, args.GetArgumentArrayRef(), log_file, m_options.log_options, m_options.buffer_size.GetCurrentValue(), m_options.handler, error_stream); - result.GetErrorStream() << error_stream.str(); + result.GetErrorStream() << error; if (success) result.SetStatus(eReturnStatusSuccessFinishNoResult); @@ -273,7 +273,7 @@ protected: if (Log::DisableLogChannel(channel, args.GetArgumentArrayRef(), error_stream)) result.SetStatus(eReturnStatusSuccessFinishNoResult); - result.GetErrorStream() << error_stream.str(); + result.GetErrorStream() << error; } } }; @@ -313,7 +313,7 @@ protected: if (success) result.SetStatus(eReturnStatusSuccessFinishResult); } - result.GetOutputStream() << output_stream.str(); + result.GetOutputStream() << output; } }; class CommandObjectLogDump : public CommandObjectParsed { @@ -404,7 +404,7 @@ protected: result.SetStatus(eReturnStatusSuccessFinishNoResult); } else { result.SetStatus(eReturnStatusFailed); - result.GetErrorStream() << error_stream.str(); + result.GetErrorStream() << error; } } diff --git a/lldb/source/Commands/CommandObjectRegexCommand.cpp b/lldb/source/Commands/CommandObjectRegexCommand.cpp index f638d70..7e27915 100644 --- a/lldb/source/Commands/CommandObjectRegexCommand.cpp +++ b/lldb/source/Commands/CommandObjectRegexCommand.cpp @@ -51,7 +51,7 @@ llvm::Expected<std::string> CommandObjectRegexCommand::SubstituteVariables( output << part; } - return output.str(); + return buffer; } void CommandObjectRegexCommand::DoExecute(llvm::StringRef command, diff --git a/lldb/source/Core/Module.cpp b/lldb/source/Core/Module.cpp index 8595a221..88cc957 100644 --- a/lldb/source/Core/Module.cpp +++ b/lldb/source/Core/Module.cpp @@ -1626,7 +1626,7 @@ uint32_t Module::Hash() { const auto mtime = llvm::sys::toTimeT(m_object_mod_time); if (mtime > 0) id_strm << mtime; - return llvm::djbHash(id_strm.str()); + return llvm::djbHash(identifier); } std::string Module::GetCacheKey() { @@ -1636,7 +1636,7 @@ std::string Module::GetCacheKey() { if (m_object_name) strm << '(' << m_object_name << ')'; strm << '-' << llvm::format_hex(Hash(), 10); - return strm.str(); + return key; } DataFileCache *Module::GetIndexCache() { diff --git a/lldb/source/Expression/IRExecutionUnit.cpp b/lldb/source/Expression/IRExecutionUnit.cpp index d2f2ee2..15ca2dd 100644 --- a/lldb/source/Expression/IRExecutionUnit.cpp +++ b/lldb/source/Expression/IRExecutionUnit.cpp @@ -261,8 +261,6 @@ void IRExecutionUnit::GetRunnableInfo(Status &error, lldb::addr_t &func_addr, m_module->print(oss, nullptr); - oss.flush(); - LLDB_LOGF(log, "Module being sent to JIT: \n%s", s.c_str()); } diff --git a/lldb/source/Expression/IRInterpreter.cpp b/lldb/source/Expression/IRInterpreter.cpp index 593258f..4909310 100644 --- a/lldb/source/Expression/IRInterpreter.cpp +++ b/lldb/source/Expression/IRInterpreter.cpp @@ -49,7 +49,6 @@ static std::string PrintValue(const Value *value, bool truncate = false) { std::string s; raw_string_ostream rso(s); value->print(rso); - rso.flush(); if (truncate) s.resize(s.length() - 1); @@ -66,7 +65,6 @@ static std::string PrintType(const Type *type, bool truncate = false) { std::string s; raw_string_ostream rso(s); type->print(rso); - rso.flush(); if (truncate) s.resize(s.length() - 1); return s; @@ -698,8 +696,6 @@ bool IRInterpreter::Interpret(llvm::Module &module, llvm::Function &function, module.print(oss, nullptr); - oss.flush(); - LLDB_LOGF(log, "Module as passed in to IRInterpreter::Interpret: \n\"%s\"", s.c_str()); } diff --git a/lldb/source/Host/windows/PipeWindows.cpp b/lldb/source/Host/windows/PipeWindows.cpp index d79dc3c..21e30f0 100644 --- a/lldb/source/Host/windows/PipeWindows.cpp +++ b/lldb/source/Host/windows/PipeWindows.cpp @@ -74,7 +74,6 @@ Status PipeWindows::CreateNew(bool child_process_inherit) { std::string pipe_name; llvm::raw_string_ostream pipe_name_stream(pipe_name); pipe_name_stream << "lldb.pipe." << ::GetCurrentProcessId() << "." << serial; - pipe_name_stream.flush(); return CreateNew(pipe_name.c_str(), child_process_inherit); } diff --git a/lldb/source/Interpreter/Options.cpp b/lldb/source/Interpreter/Options.cpp index 27eda289..b8a3f68a 100644 --- a/lldb/source/Interpreter/Options.cpp +++ b/lldb/source/Interpreter/Options.cpp @@ -923,7 +923,7 @@ static std::string BuildShortOptions(const Option *long_options) { } } } - return std::move(sstr.str()); + return storage; } llvm::Expected<Args> Options::ParseAlias(const Args &args, diff --git a/lldb/source/Plugins/Disassembler/LLVMC/DisassemblerLLVMC.cpp b/lldb/source/Plugins/Disassembler/LLVMC/DisassemblerLLVMC.cpp index 1628107..31edd8d4 100644 --- a/lldb/source/Plugins/Disassembler/LLVMC/DisassemblerLLVMC.cpp +++ b/lldb/source/Plugins/Disassembler/LLVMC/DisassemblerLLVMC.cpp @@ -1366,8 +1366,6 @@ void DisassemblerLLVMC::MCDisasmInstance::PrintMCInst( *m_subtarget_info_up, inst_stream); m_instr_printer_up->setCommentStream(llvm::nulls()); - comments_stream.flush(); - static std::string g_newlines("\r\n"); for (size_t newline_pos = 0; diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp index 90f26de..2fe3c04 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp @@ -984,7 +984,6 @@ private: ToInsert += "("; raw_string_ostream OS(Description); F->print(OS, m_desc_policy, false); - OS.flush(); } else if (const VarDecl *V = dyn_cast<VarDecl>(D)) { Description = V->getType().getAsString(m_desc_policy); } else if (const FieldDecl *F = dyn_cast<FieldDecl>(D)) { @@ -1358,7 +1357,6 @@ bool ClangExpressionParser::RewriteExpression( llvm::raw_string_ostream out_stream(fixed_expression); main_file_buffer.write(out_stream); - out_stream.flush(); diagnostic_manager.SetFixedExpression(fixed_expression); return true; diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp index 024fc75..a43701c 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp @@ -147,7 +147,6 @@ void StoringDiagnosticConsumer::HandleDiagnostic( // Print the diagnostic to m_output. m_output.clear(); m_diag_printer->HandleDiagnostic(DiagLevel, info); - m_os->flush(); // Store the diagnostic for later. m_diagnostics.push_back(IDAndDiagnostic(DiagLevel, m_output)); diff --git a/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp b/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp index defd72b..45bdc72 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp @@ -94,7 +94,6 @@ static std::string PrintValue(llvm::Value *V, bool truncate = false) { std::string s; raw_string_ostream rso(s); V->print(rso); - rso.flush(); if (truncate) s.resize(s.length() - 1); return s; @@ -553,8 +552,6 @@ bool IRDynamicChecks::runOnModule(llvm::Module &M) { M.print(oss, nullptr); - oss.flush(); - LLDB_LOGF(log, "Module after dynamic checks: \n%s", s.c_str()); } diff --git a/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp b/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp index 34461da..3764668 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp @@ -86,7 +86,6 @@ static std::string PrintValue(const Value *value, bool truncate = false) { if (value) { raw_string_ostream rso(s); value->print(rso); - rso.flush(); if (truncate) s.resize(s.length() - 1); } @@ -97,7 +96,6 @@ static std::string PrintType(const llvm::Type *type, bool truncate = false) { std::string s; raw_string_ostream rso(s); type->print(rso); - rso.flush(); if (truncate) s.resize(s.length() - 1); return s; @@ -244,7 +242,6 @@ bool IRForTarget::CreateResultVariable(llvm::Function &llvm_function) { std::string decl_desc_str; raw_string_ostream decl_desc_stream(decl_desc_str); result_decl->print(decl_desc_stream); - decl_desc_stream.flush(); LLDB_LOG(log, "Found result decl: \"{0}\"", decl_desc_str); } @@ -1616,8 +1613,6 @@ bool IRForTarget::runOnModule(Module &llvm_module) { m_module->print(oss, nullptr); - oss.flush(); - LLDB_LOG(log, "Module as passed in to IRForTarget: \n\"{0}\"", s); } @@ -1663,8 +1658,6 @@ bool IRForTarget::runOnModule(Module &llvm_module) { m_module->print(oss, nullptr); - oss.flush(); - LLDB_LOG(log, "Module after creating the result variable: \n\"{0}\"", s); } @@ -1762,8 +1755,6 @@ bool IRForTarget::runOnModule(Module &llvm_module) { m_module->print(oss, nullptr); - oss.flush(); - LLDB_LOG(log, "Module after preparing for execution: \n\"{0}\"", s); } diff --git a/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp b/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp index feaa51a..7d3b241 100644 --- a/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp +++ b/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp @@ -27,6 +27,7 @@ #include "Plugins/LanguageRuntime/CPlusPlus/CPPLanguageRuntime.h" #include "Plugins/TypeSystem/Clang/TypeSystemClang.h" #include "lldb/lldb-enumerations.h" +#include "lldb/lldb-forward.h" #include <optional> #include <tuple> @@ -34,6 +35,32 @@ using namespace lldb; using namespace lldb_private; using namespace lldb_private::formatters; +static void consumeInlineNamespace(llvm::StringRef &name) { + // Delete past an inline namespace, if any: __[a-zA-Z0-9_]+:: + auto scratch = name; + if (scratch.consume_front("__") && std::isalnum(scratch[0])) { + scratch = scratch.drop_while([](char c) { return std::isalnum(c); }); + if (scratch.consume_front("::")) { + // Successfully consumed a namespace. + name = scratch; + } + } +} + +bool lldb_private::formatters::isOldCompressedPairLayout( + ValueObject &pair_obj) { + return isStdTemplate(pair_obj.GetTypeName(), "__compressed_pair"); +} + +bool lldb_private::formatters::isStdTemplate(ConstString type_name, + llvm::StringRef type) { + llvm::StringRef name = type_name.GetStringRef(); + // The type name may be prefixed with `std::__<inline-namespace>::`. + if (name.consume_front("std::")) + consumeInlineNamespace(name); + return name.consume_front(type) && name.starts_with("<"); +} + lldb::ValueObjectSP lldb_private::formatters::GetChildMemberWithName( ValueObject &obj, llvm::ArrayRef<ConstString> alternative_names) { for (ConstString name : alternative_names) { @@ -53,7 +80,7 @@ lldb_private::formatters::GetFirstValueOfLibCXXCompressedPair( if (first_child) value = first_child->GetChildMemberWithName("__value_"); if (!value) { - // pre-r300140 member name + // pre-c88580c member name value = pair.GetChildMemberWithName("__first_"); } return value; @@ -70,7 +97,7 @@ lldb_private::formatters::GetSecondValueOfLibCXXCompressedPair( } } if (!value) { - // pre-r300140 member name + // pre-c88580c member name value = pair.GetChildMemberWithName("__second_"); } return value; @@ -176,7 +203,9 @@ bool lldb_private::formatters::LibcxxUniquePointerSummaryProvider( if (!ptr_sp) return false; - ptr_sp = GetFirstValueOfLibCXXCompressedPair(*ptr_sp); + if (isOldCompressedPairLayout(*ptr_sp)) + ptr_sp = GetFirstValueOfLibCXXCompressedPair(*ptr_sp); + if (!ptr_sp) return false; @@ -363,13 +392,22 @@ lldb_private::formatters::LibcxxUniquePtrSyntheticFrontEnd::Update() { // Retrieve the actual pointer and the deleter, and clone them to give them // user-friendly names. - ValueObjectSP value_pointer_sp = GetFirstValueOfLibCXXCompressedPair(*ptr_sp); - if (value_pointer_sp) - m_value_ptr_sp = value_pointer_sp->Clone(ConstString("pointer")); + if (isOldCompressedPairLayout(*ptr_sp)) { + if (ValueObjectSP value_pointer_sp = + GetFirstValueOfLibCXXCompressedPair(*ptr_sp)) + m_value_ptr_sp = value_pointer_sp->Clone(ConstString("pointer")); + + if (ValueObjectSP deleter_sp = + GetSecondValueOfLibCXXCompressedPair(*ptr_sp)) + m_deleter_sp = deleter_sp->Clone(ConstString("deleter")); + } else { + m_value_ptr_sp = ptr_sp->Clone(ConstString("pointer")); - ValueObjectSP deleter_sp = GetSecondValueOfLibCXXCompressedPair(*ptr_sp); - if (deleter_sp) - m_deleter_sp = deleter_sp->Clone(ConstString("deleter")); + if (ValueObjectSP deleter_sp = + valobj_sp->GetChildMemberWithName("__deleter_")) + if (deleter_sp->GetNumChildrenIgnoringErrors() > 0) + m_deleter_sp = deleter_sp->Clone(ConstString("deleter")); + } return lldb::ChildCacheState::eRefetch; } @@ -407,24 +445,27 @@ namespace { enum class StringLayout { CSD, DSC }; } +static ValueObjectSP ExtractLibCxxStringData(ValueObject &valobj) { + if (auto rep_sp = valobj.GetChildMemberWithName("__rep_")) + return rep_sp; + + ValueObjectSP valobj_r_sp = valobj.GetChildMemberWithName("__r_"); + if (!valobj_r_sp || !valobj_r_sp->GetError().Success()) + return nullptr; + + if (!isOldCompressedPairLayout(*valobj_r_sp)) + return nullptr; + + return GetFirstValueOfLibCXXCompressedPair(*valobj_r_sp); +} + /// Determine the size in bytes of \p valobj (a libc++ std::string object) and /// extract its data payload. Return the size + payload pair. // TODO: Support big-endian architectures. static std::optional<std::pair<uint64_t, ValueObjectSP>> ExtractLibcxxStringInfo(ValueObject &valobj) { - ValueObjectSP valobj_r_sp = valobj.GetChildMemberWithName("__r_"); - if (!valobj_r_sp || !valobj_r_sp->GetError().Success()) - return {}; - - // __r_ is a compressed_pair of the actual data and the allocator. The data we - // want is in the first base class. - ValueObjectSP valobj_r_base_sp = valobj_r_sp->GetChildAtIndex(0); - if (!valobj_r_base_sp) - return {}; - - ValueObjectSP valobj_rep_sp = - valobj_r_base_sp->GetChildMemberWithName("__value_"); - if (!valobj_rep_sp) + ValueObjectSP valobj_rep_sp = ExtractLibCxxStringData(valobj); + if (!valobj_rep_sp || !valobj_rep_sp->GetError().Success()) return {}; ValueObjectSP l = valobj_rep_sp->GetChildMemberWithName("__l"); diff --git a/lldb/source/Plugins/Language/CPlusPlus/LibCxx.h b/lldb/source/Plugins/Language/CPlusPlus/LibCxx.h index 5307b52..dad0336 100644 --- a/lldb/source/Plugins/Language/CPlusPlus/LibCxx.h +++ b/lldb/source/Plugins/Language/CPlusPlus/LibCxx.h @@ -25,7 +25,8 @@ GetChildMemberWithName(ValueObject &obj, lldb::ValueObjectSP GetFirstValueOfLibCXXCompressedPair(ValueObject &pair); lldb::ValueObjectSP GetSecondValueOfLibCXXCompressedPair(ValueObject &pair); - +bool isOldCompressedPairLayout(ValueObject &pair_obj); +bool isStdTemplate(ConstString type_name, llvm::StringRef type); bool LibcxxStringSummaryProviderASCII( ValueObject &valobj, Stream &stream, diff --git a/lldb/source/Plugins/Language/CPlusPlus/LibCxxList.cpp b/lldb/source/Plugins/Language/CPlusPlus/LibCxxList.cpp index d7cfeb3..4479f59 100644 --- a/lldb/source/Plugins/Language/CPlusPlus/LibCxxList.cpp +++ b/lldb/source/Plugins/Language/CPlusPlus/LibCxxList.cpp @@ -17,6 +17,7 @@ #include "lldb/Utility/Endian.h" #include "lldb/Utility/Status.h" #include "lldb/Utility/Stream.h" +#include "lldb/lldb-enumerations.h" using namespace lldb; using namespace lldb_private; @@ -294,12 +295,17 @@ lldb::ChildCacheState ForwardListFrontEnd::Update() { ValueObjectSP impl_sp(m_backend.GetChildMemberWithName("__before_begin_")); if (!impl_sp) - return lldb::ChildCacheState::eRefetch; - impl_sp = GetFirstValueOfLibCXXCompressedPair(*impl_sp); + return ChildCacheState::eRefetch; + + if (isOldCompressedPairLayout(*impl_sp)) + impl_sp = GetFirstValueOfLibCXXCompressedPair(*impl_sp); + if (!impl_sp) - return lldb::ChildCacheState::eRefetch; + return ChildCacheState::eRefetch; + m_head = impl_sp->GetChildMemberWithName("__next_").get(); - return lldb::ChildCacheState::eRefetch; + + return ChildCacheState::eRefetch; } ListFrontEnd::ListFrontEnd(lldb::ValueObjectSP valobj_sp) @@ -313,34 +319,42 @@ llvm::Expected<uint32_t> ListFrontEnd::CalculateNumChildren() { return m_count; if (!m_head || !m_tail || m_node_address == 0) return 0; - ValueObjectSP size_alloc(m_backend.GetChildMemberWithName("__size_alloc_")); - if (size_alloc) { - ValueObjectSP value = GetFirstValueOfLibCXXCompressedPair(*size_alloc); - if (value) { - m_count = value->GetValueAsUnsigned(UINT32_MAX); - } + + ValueObjectSP size_node_sp(m_backend.GetChildMemberWithName("__size_")); + if (!size_node_sp) { + size_node_sp = m_backend.GetChildMemberWithName( + "__size_alloc_"); // pre-compressed_pair rework + + if (!isOldCompressedPairLayout(*size_node_sp)) + return llvm::createStringError("Unexpected std::list layout: expected " + "old __compressed_pair layout."); + + size_node_sp = GetFirstValueOfLibCXXCompressedPair(*size_node_sp); } - if (m_count != UINT32_MAX) { + + if (size_node_sp) + m_count = size_node_sp->GetValueAsUnsigned(UINT32_MAX); + + if (m_count != UINT32_MAX) return m_count; - } else { - uint64_t next_val = m_head->GetValueAsUnsigned(0); - uint64_t prev_val = m_tail->GetValueAsUnsigned(0); - if (next_val == 0 || prev_val == 0) - return 0; - if (next_val == m_node_address) - return 0; - if (next_val == prev_val) - return 1; - uint64_t size = 2; - ListEntry current(m_head); - while (current.next() && current.next().value() != m_node_address) { - size++; - current = current.next(); - if (size > m_list_capping_size) - break; - } - return m_count = (size - 1); + + uint64_t next_val = m_head->GetValueAsUnsigned(0); + uint64_t prev_val = m_tail->GetValueAsUnsigned(0); + if (next_val == 0 || prev_val == 0) + return 0; + if (next_val == m_node_address) + return 0; + if (next_val == prev_val) + return 1; + uint64_t size = 2; + ListEntry current(m_head); + while (current.next() && current.next().value() != m_node_address) { + size++; + current = current.next(); + if (size > m_list_capping_size) + break; } + return m_count = (size - 1); } lldb::ValueObjectSP ListFrontEnd::GetChildAtIndex(uint32_t idx) { diff --git a/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp b/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp index 5106a63..af3e41b 100644 --- a/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp +++ b/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp @@ -202,6 +202,8 @@ public: size_t GetIndexOfChildWithName(ConstString name) override; private: + llvm::Expected<uint32_t> CalculateNumChildrenForOldCompressedPairLayout(); + /// Returns the ValueObject for the __tree_node type that /// holds the key/value pair of the node at index \ref idx. /// @@ -254,6 +256,27 @@ lldb_private::formatters::LibcxxStdMapSyntheticFrontEnd:: Update(); } +llvm::Expected<uint32_t> +lldb_private::formatters::LibcxxStdMapSyntheticFrontEnd:: + CalculateNumChildrenForOldCompressedPairLayout() { + ValueObjectSP node_sp(m_tree->GetChildMemberWithName("__pair3_")); + if (!node_sp) + return 0; + + if (!isOldCompressedPairLayout(*node_sp)) + return llvm::createStringError("Unexpected std::map layout: expected " + "old __compressed_pair layout."); + + node_sp = GetFirstValueOfLibCXXCompressedPair(*node_sp); + + if (!node_sp) + return 0; + + m_count = node_sp->GetValueAsUnsigned(0); + + return m_count; +} + llvm::Expected<uint32_t> lldb_private::formatters:: LibcxxStdMapSyntheticFrontEnd::CalculateNumChildren() { if (m_count != UINT32_MAX) @@ -262,17 +285,12 @@ llvm::Expected<uint32_t> lldb_private::formatters:: if (m_tree == nullptr) return 0; - ValueObjectSP size_node(m_tree->GetChildMemberWithName("__pair3_")); - if (!size_node) - return 0; - - size_node = GetFirstValueOfLibCXXCompressedPair(*size_node); - - if (!size_node) - return 0; + if (auto node_sp = m_tree->GetChildMemberWithName("__size_")) { + m_count = node_sp->GetValueAsUnsigned(0); + return m_count; + } - m_count = size_node->GetValueAsUnsigned(0); - return m_count; + return CalculateNumChildrenForOldCompressedPairLayout(); } ValueObjectSP @@ -371,6 +389,7 @@ lldb_private::formatters::LibcxxStdMapSyntheticFrontEnd::Update() { m_tree = m_backend.GetChildMemberWithName("__tree_").get(); if (!m_tree) return lldb::ChildCacheState::eRefetch; + m_root_node = m_tree->GetChildMemberWithName("__begin_node_").get(); m_node_ptr_type = m_tree->GetCompilerType().GetDirectNestedTypeWithName("__node_pointer"); diff --git a/lldb/source/Plugins/Language/CPlusPlus/LibCxxUnorderedMap.cpp b/lldb/source/Plugins/Language/CPlusPlus/LibCxxUnorderedMap.cpp index 93e7f4f..2f65c72 100644 --- a/lldb/source/Plugins/Language/CPlusPlus/LibCxxUnorderedMap.cpp +++ b/lldb/source/Plugins/Language/CPlusPlus/LibCxxUnorderedMap.cpp @@ -19,6 +19,7 @@ #include "lldb/Utility/Status.h" #include "lldb/Utility/Stream.h" #include "llvm/ADT/StringRef.h" +#include "llvm/Support/Error.h" using namespace lldb; using namespace lldb_private; @@ -44,6 +45,10 @@ public: size_t GetIndexOfChildWithName(ConstString name) override; private: + CompilerType GetNodeType(); + CompilerType GetElementType(CompilerType node_type); + llvm::Expected<size_t> CalculateNumChildrenImpl(ValueObject &table); + CompilerType m_element_type; CompilerType m_node_type; ValueObject *m_tree = nullptr; @@ -91,29 +96,53 @@ llvm::Expected<uint32_t> lldb_private::formatters:: return m_num_elements; } -static void consumeInlineNamespace(llvm::StringRef &name) { - // Delete past an inline namespace, if any: __[a-zA-Z0-9_]+:: - auto scratch = name; - if (scratch.consume_front("__") && std::isalnum(scratch[0])) { - scratch = scratch.drop_while([](char c) { return std::isalnum(c); }); - if (scratch.consume_front("::")) { - // Successfully consumed a namespace. - name = scratch; - } - } +static bool isUnorderedMap(ConstString type_name) { + return isStdTemplate(type_name, "unordered_map") || + isStdTemplate(type_name, "unordered_multimap"); } -static bool isStdTemplate(ConstString type_name, llvm::StringRef type) { - llvm::StringRef name = type_name.GetStringRef(); - // The type name may be prefixed with `std::__<inline-namespace>::`. - if (name.consume_front("std::")) - consumeInlineNamespace(name); - return name.consume_front(type) && name.starts_with("<"); +CompilerType lldb_private::formatters::LibcxxStdUnorderedMapSyntheticFrontEnd:: + GetElementType(CompilerType node_type) { + CompilerType element_type = node_type.GetTypeTemplateArgument(0); + + // This synthetic provider is used for both unordered_(multi)map and + // unordered_(multi)set. For unordered_map, the element type has an + // additional type layer, an internal struct (`__hash_value_type`) + // that wraps a std::pair. Peel away the internal wrapper type - whose + // structure is of no value to users, to expose the std::pair. This + // matches the structure returned by the std::map synthetic provider. + if (isUnorderedMap(m_backend.GetTypeName())) { + std::string name; + CompilerType field_type = + element_type.GetFieldAtIndex(0, name, nullptr, nullptr, nullptr); + CompilerType actual_type = field_type.GetTypedefedType(); + if (isStdTemplate(actual_type.GetTypeName(), "pair")) + element_type = actual_type; + } + + return element_type; } -static bool isUnorderedMap(ConstString type_name) { - return isStdTemplate(type_name, "unordered_map") || - isStdTemplate(type_name, "unordered_multimap"); +CompilerType lldb_private::formatters::LibcxxStdUnorderedMapSyntheticFrontEnd:: + GetNodeType() { + auto node_sp = m_backend.GetChildAtNamePath({"__table_", "__first_node_"}); + + if (!node_sp) { + auto p1_sp = m_backend.GetChildAtNamePath({"__table_", "__p1_"}); + if (!p1_sp) + return {}; + + if (!isOldCompressedPairLayout(*p1_sp)) + return {}; + + node_sp = GetFirstValueOfLibCXXCompressedPair(*p1_sp); + if (!node_sp) + return {}; + } + + assert(node_sp); + + return node_sp->GetCompilerType().GetTypeTemplateArgument(0).GetPointeeType(); } lldb::ValueObjectSP lldb_private::formatters:: @@ -136,36 +165,12 @@ lldb::ValueObjectSP lldb_private::formatters:: ValueObjectSP hash_sp = node_sp->GetChildMemberWithName("__hash_"); if (!hash_sp || !value_sp) { if (!m_element_type) { - auto p1_sp = m_backend.GetChildAtNamePath({"__table_", "__p1_"}); - if (!p1_sp) - return nullptr; - - ValueObjectSP first_sp = GetFirstValueOfLibCXXCompressedPair(*p1_sp); - if (!first_sp) + m_node_type = GetNodeType(); + if (!m_node_type) return nullptr; - m_element_type = first_sp->GetCompilerType(); - m_element_type = m_element_type.GetTypeTemplateArgument(0); - m_element_type = m_element_type.GetPointeeType(); - m_node_type = m_element_type; - m_element_type = m_element_type.GetTypeTemplateArgument(0); - // This synthetic provider is used for both unordered_(multi)map and - // unordered_(multi)set. For unordered_map, the element type has an - // additional type layer, an internal struct (`__hash_value_type`) - // that wraps a std::pair. Peel away the internal wrapper type - whose - // structure is of no value to users, to expose the std::pair. This - // matches the structure returned by the std::map synthetic provider. - if (isUnorderedMap(m_backend.GetTypeName())) { - std::string name; - CompilerType field_type = m_element_type.GetFieldAtIndex( - 0, name, nullptr, nullptr, nullptr); - CompilerType actual_type = field_type.GetTypedefedType(); - if (isStdTemplate(actual_type.GetTypeName(), "pair")) - m_element_type = actual_type; - } + m_element_type = GetElementType(m_node_type); } - if (!m_node_type) - return nullptr; node_sp = m_next_element->Cast(m_node_type.GetPointerType()) ->Dereference(error); if (!node_sp || error.Fail()) @@ -217,6 +222,49 @@ lldb::ValueObjectSP lldb_private::formatters:: m_element_type); } +llvm::Expected<size_t> +lldb_private::formatters::LibcxxStdUnorderedMapSyntheticFrontEnd:: + CalculateNumChildrenImpl(ValueObject &table) { + if (auto size_sp = table.GetChildMemberWithName("__size_")) + return size_sp->GetValueAsUnsigned(0); + + ValueObjectSP p2_sp = table.GetChildMemberWithName("__p2_"); + if (!p2_sp) + return llvm::createStringError( + "Unexpected std::unordered_map layout: __p2_ member not found."); + + if (!isOldCompressedPairLayout(*p2_sp)) + return llvm::createStringError("Unexpected std::unordered_map layout: old " + "__compressed_pair layout not found."); + + ValueObjectSP num_elements_sp = GetFirstValueOfLibCXXCompressedPair(*p2_sp); + + if (!num_elements_sp) + return llvm::createStringError( + "Unexpected std::unordered_map layout: failed to retrieve first member " + "in old __compressed_pair layout."); + + return num_elements_sp->GetValueAsUnsigned(0); +} + +static ValueObjectSP GetTreePointer(ValueObject &table) { + ValueObjectSP tree_sp = table.GetChildMemberWithName("__first_node_"); + if (!tree_sp) { + ValueObjectSP p1_sp = table.GetChildMemberWithName("__p1_"); + if (!p1_sp) + return nullptr; + + if (!isOldCompressedPairLayout(*p1_sp)) + return nullptr; + + tree_sp = GetFirstValueOfLibCXXCompressedPair(*p1_sp); + if (!tree_sp) + return nullptr; + } + + return tree_sp->GetChildMemberWithName("__next_"); +} + lldb::ChildCacheState lldb_private::formatters::LibcxxStdUnorderedMapSyntheticFrontEnd::Update() { m_num_elements = 0; @@ -226,27 +274,19 @@ lldb_private::formatters::LibcxxStdUnorderedMapSyntheticFrontEnd::Update() { if (!table_sp) return lldb::ChildCacheState::eRefetch; - ValueObjectSP p2_sp = table_sp->GetChildMemberWithName("__p2_"); - if (!p2_sp) - return lldb::ChildCacheState::eRefetch; - - ValueObjectSP num_elements_sp = GetFirstValueOfLibCXXCompressedPair(*p2_sp); - if (!num_elements_sp) - return lldb::ChildCacheState::eRefetch; - - ValueObjectSP p1_sp = table_sp->GetChildMemberWithName("__p1_"); - if (!p1_sp) + ValueObjectSP tree_sp = GetTreePointer(*table_sp); + if (!tree_sp) return lldb::ChildCacheState::eRefetch; - ValueObjectSP value_sp = GetFirstValueOfLibCXXCompressedPair(*p1_sp); - if (!value_sp) - return lldb::ChildCacheState::eRefetch; + m_tree = tree_sp.get(); - m_tree = value_sp->GetChildMemberWithName("__next_").get(); - if (m_tree == nullptr) + if (auto num_elems_or_err = CalculateNumChildrenImpl(*table_sp)) + m_num_elements = *num_elems_or_err; + else { + LLDB_LOG_ERRORV(GetLog(LLDBLog::DataFormatters), + num_elems_or_err.takeError(), "{0}"); return lldb::ChildCacheState::eRefetch; - - m_num_elements = num_elements_sp->GetValueAsUnsigned(0); + } if (m_num_elements > 0) m_next_element = m_tree; diff --git a/lldb/source/Plugins/Language/CPlusPlus/LibCxxVector.cpp b/lldb/source/Plugins/Language/CPlusPlus/LibCxxVector.cpp index 461fed3..3609219 100644 --- a/lldb/source/Plugins/Language/CPlusPlus/LibCxxVector.cpp +++ b/lldb/source/Plugins/Language/CPlusPlus/LibCxxVector.cpp @@ -11,6 +11,8 @@ #include "lldb/Core/ValueObject.h" #include "lldb/DataFormatters/FormattersHelpers.h" #include "lldb/Utility/ConstString.h" +#include "lldb/lldb-enumerations.h" +#include "lldb/lldb-forward.h" #include <optional> using namespace lldb; @@ -116,20 +118,29 @@ lldb_private::formatters::LibcxxStdVectorSyntheticFrontEnd::GetChildAtIndex( m_element_type); } +static ValueObjectSP GetDataPointer(ValueObject &root) { + if (auto cap_sp = root.GetChildMemberWithName("__cap_")) + return cap_sp; + + ValueObjectSP cap_sp = root.GetChildMemberWithName("__end_cap_"); + if (!cap_sp) + return nullptr; + + if (!isOldCompressedPairLayout(*cap_sp)) + return nullptr; + + return GetFirstValueOfLibCXXCompressedPair(*cap_sp); +} + lldb::ChildCacheState lldb_private::formatters::LibcxxStdVectorSyntheticFrontEnd::Update() { m_start = m_finish = nullptr; - ValueObjectSP data_type_finder_sp( - m_backend.GetChildMemberWithName("__end_cap_")); - if (!data_type_finder_sp) - return lldb::ChildCacheState::eRefetch; + ValueObjectSP data_sp(GetDataPointer(m_backend)); - data_type_finder_sp = - GetFirstValueOfLibCXXCompressedPair(*data_type_finder_sp); - if (!data_type_finder_sp) + if (!data_sp) return lldb::ChildCacheState::eRefetch; - m_element_type = data_type_finder_sp->GetCompilerType().GetPointeeType(); + m_element_type = data_sp->GetCompilerType().GetPointeeType(); if (std::optional<uint64_t> size = m_element_type.GetByteSize(nullptr)) { m_element_size = *size; @@ -216,17 +227,6 @@ lldb_private::formatters::LibcxxVectorBoolSyntheticFrontEnd::GetChildAtIndex( return retval_sp; } -/*(std::__1::vector<std::__1::allocator<bool> >) vBool = { - __begin_ = 0x00000001001000e0 - __size_ = 56 - __cap_alloc_ = { - std::__1::__libcpp_compressed_pair_imp<unsigned long, - std::__1::allocator<unsigned long> > = { - __first_ = 1 - } - } - }*/ - lldb::ChildCacheState lldb_private::formatters::LibcxxVectorBoolSyntheticFrontEnd::Update() { m_children.clear(); diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp index 0f15387..41492ca 100644 --- a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp +++ b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp @@ -2685,7 +2685,7 @@ void AppleObjCRuntimeV2::WarnIfNoExpandedSharedCache() { } os << ". This will likely reduce debugging performance.\n"; - Debugger::ReportWarning(os.str(), debugger.GetID(), + Debugger::ReportWarning(buffer, debugger.GetID(), &m_no_expanded_cache_warning); } diff --git a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp index c367489..06da83e 100644 --- a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp +++ b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp @@ -3768,6 +3768,7 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) { SymbolType type = eSymbolTypeInvalid; SectionSP symbol_section; + lldb::addr_t symbol_byte_size = 0; bool add_nlist = true; bool is_gsym = false; bool demangled_is_synthesized = false; @@ -4353,6 +4354,47 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) { if (symbol_section) { const addr_t section_file_addr = symbol_section->GetFileAddress(); + if (symbol_byte_size == 0 && function_starts_count > 0) { + addr_t symbol_lookup_file_addr = nlist.n_value; + // Do an exact address match for non-ARM addresses, else get the + // closest since the symbol might be a thumb symbol which has an + // address with bit zero set. + FunctionStarts::Entry *func_start_entry = + function_starts.FindEntry(symbol_lookup_file_addr, !is_arm); + if (is_arm && func_start_entry) { + // Verify that the function start address is the symbol address + // (ARM) or the symbol address + 1 (thumb). + if (func_start_entry->addr != symbol_lookup_file_addr && + func_start_entry->addr != (symbol_lookup_file_addr + 1)) { + // Not the right entry, NULL it out... + func_start_entry = nullptr; + } + } + if (func_start_entry) { + func_start_entry->data = true; + + addr_t symbol_file_addr = func_start_entry->addr; + if (is_arm) + symbol_file_addr &= THUMB_ADDRESS_BIT_MASK; + + const FunctionStarts::Entry *next_func_start_entry = + function_starts.FindNextEntry(func_start_entry); + const addr_t section_end_file_addr = + section_file_addr + symbol_section->GetByteSize(); + if (next_func_start_entry) { + addr_t next_symbol_file_addr = next_func_start_entry->addr; + // Be sure the clear the Thumb address bit when we calculate the + // size from the current and next address + if (is_arm) + next_symbol_file_addr &= THUMB_ADDRESS_BIT_MASK; + symbol_byte_size = std::min<lldb::addr_t>( + next_symbol_file_addr - symbol_file_addr, + section_end_file_addr - symbol_file_addr); + } else { + symbol_byte_size = section_end_file_addr - symbol_file_addr; + } + } + } symbol_value -= section_file_addr; } @@ -4459,6 +4501,9 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) { if (nlist.n_desc & N_WEAK_REF) sym[sym_idx].SetIsWeak(true); + if (symbol_byte_size > 0) + sym[sym_idx].SetByteSize(symbol_byte_size); + if (demangled_is_synthesized) sym[sym_idx].SetDemangledNameIsSynthesized(true); @@ -4577,7 +4622,23 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) { Address symbol_addr; if (module_sp->ResolveFileAddress(symbol_file_addr, symbol_addr)) { SectionSP symbol_section(symbol_addr.GetSection()); + uint32_t symbol_byte_size = 0; if (symbol_section) { + const addr_t section_file_addr = symbol_section->GetFileAddress(); + const FunctionStarts::Entry *next_func_start_entry = + function_starts.FindNextEntry(func_start_entry); + const addr_t section_end_file_addr = + section_file_addr + symbol_section->GetByteSize(); + if (next_func_start_entry) { + addr_t next_symbol_file_addr = next_func_start_entry->addr; + if (is_arm) + next_symbol_file_addr &= THUMB_ADDRESS_BIT_MASK; + symbol_byte_size = std::min<lldb::addr_t>( + next_symbol_file_addr - symbol_file_addr, + section_end_file_addr - symbol_file_addr); + } else { + symbol_byte_size = section_end_file_addr - symbol_file_addr; + } sym[sym_idx].SetID(synthetic_sym_id++); // Don't set the name for any synthetic symbols, the Symbol // object will generate one if needed when the name is accessed @@ -4589,6 +4650,8 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) { add_symbol_addr(symbol_addr.GetFileAddress()); if (symbol_flags) sym[sym_idx].SetFlags(symbol_flags); + if (symbol_byte_size) + sym[sym_idx].SetByteSize(symbol_byte_size); ++sym_idx; } } diff --git a/lldb/source/Plugins/Process/Windows/Common/NativeProcessWindows.cpp b/lldb/source/Plugins/Process/Windows/Common/NativeProcessWindows.cpp index 24c9aa6..9c330ff 100644 --- a/lldb/source/Plugins/Process/Windows/Common/NativeProcessWindows.cpp +++ b/lldb/source/Plugins/Process/Windows/Common/NativeProcessWindows.cpp @@ -548,7 +548,7 @@ NativeProcessWindows::OnDebugException(bool first_chance, << " encountered at address " << llvm::format_hex(record.GetExceptionAddress(), 8); StopThread(record.GetThreadID(), StopReason::eStopReasonException, - desc_stream.str().c_str()); + desc.c_str()); SetState(eStateStopped, true); } diff --git a/lldb/source/Plugins/Process/Windows/Common/ProcessWindows.cpp b/lldb/source/Plugins/Process/Windows/Common/ProcessWindows.cpp index b25068d..703aa08 100644 --- a/lldb/source/Plugins/Process/Windows/Common/ProcessWindows.cpp +++ b/lldb/source/Plugins/Process/Windows/Common/ProcessWindows.cpp @@ -492,10 +492,10 @@ void ProcessWindows::RefreshStateAfterStop() { << llvm::format_hex(active_exception->GetExceptionAddress(), 8); DumpAdditionalExceptionInformation(desc_stream, active_exception); - stop_info = StopInfo::CreateStopReasonWithException( - *stop_thread, desc_stream.str().c_str()); + stop_info = + StopInfo::CreateStopReasonWithException(*stop_thread, desc.c_str()); stop_thread->SetStopInfo(stop_info); - LLDB_LOG(log, "{0}", desc_stream.str()); + LLDB_LOG(log, "{0}", desc); return; } } diff --git a/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.cpp b/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.cpp index 23baa92..d005cf1 100644 --- a/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.cpp +++ b/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.cpp @@ -3668,7 +3668,6 @@ GDBRemoteCommunicationClient::SendTraceStop(const TraceStopRequest &request, std::string json_string; llvm::raw_string_ostream os(json_string); os << toJSON(request); - os.flush(); escaped_packet.PutEscapedBytes(json_string.c_str(), json_string.size()); @@ -3738,7 +3737,6 @@ GDBRemoteCommunicationClient::SendTraceGetState(llvm::StringRef type, std::string json_string; llvm::raw_string_ostream os(json_string); os << toJSON(TraceGetStateRequest{type.str()}); - os.flush(); escaped_packet.PutEscapedBytes(json_string.c_str(), json_string.size()); @@ -3772,7 +3770,6 @@ GDBRemoteCommunicationClient::SendTraceGetBinaryData( std::string json_string; llvm::raw_string_ostream os(json_string); os << toJSON(request); - os.flush(); escaped_packet.PutEscapedBytes(json_string.c_str(), json_string.size()); @@ -4045,7 +4042,7 @@ GDBRemoteCommunicationClient::ReadExtFeature(llvm::StringRef object, } } - return output_stream.str(); + return output; } // Notify the target that gdb is prepared to serve symbol lookup requests. diff --git a/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp b/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp index 271ff61..d5dfe79 100644 --- a/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp +++ b/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp @@ -5360,7 +5360,7 @@ std::string ProcessGDBRemote::HarmonizeThreadIdsForProfileData( output_stream << end_delimiter; m_thread_id_to_used_usec_map = new_thread_id_to_used_usec_map; - return output_stream.str(); + return output; } void ProcessGDBRemote::HandleStopReply() { diff --git a/lldb/source/Plugins/Process/minidump/ProcessMinidump.cpp b/lldb/source/Plugins/Process/minidump/ProcessMinidump.cpp index 42cc9f0..32ffba7 100644 --- a/lldb/source/Plugins/Process/minidump/ProcessMinidump.cpp +++ b/lldb/source/Plugins/Process/minidump/ProcessMinidump.cpp @@ -289,8 +289,8 @@ void ProcessMinidump::RefreshStateAfterStop() { << " encountered at address " << llvm::format_hex( exception_stream.ExceptionRecord.ExceptionAddress, 8); - stop_info = StopInfo::CreateStopReasonWithException( - *stop_thread, desc_stream.str().c_str()); + stop_info = + StopInfo::CreateStopReasonWithException(*stop_thread, desc.c_str()); } stop_thread->SetStopInfo(stop_info); diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp index 32d8a92..373b3d9 100644 --- a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp +++ b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp @@ -501,5 +501,5 @@ void DebugNamesDWARFIndex::Dump(Stream &s) { std::string data; llvm::raw_string_ostream os(data); m_debug_names_up->dump(os); - s.PutCString(os.str()); + s.PutCString(data); } diff --git a/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp b/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp index d581d377..887983d 100644 --- a/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp +++ b/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp @@ -697,7 +697,7 @@ std::string ManualDWARFIndex::GetCacheKey() { ObjectFile *objfile = m_dwarf->GetObjectFile(); strm << objfile->GetModule()->GetCacheKey() << "-dwarf-index-" << llvm::format_hex(objfile->GetCacheHash(), 10); - return strm.str(); + return key; } bool ManualDWARFIndex::LoadFromCache() { diff --git a/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleLoader.cpp b/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleLoader.cpp index 1a9f6fe..c00eebc 100644 --- a/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleLoader.cpp +++ b/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleLoader.cpp @@ -72,7 +72,7 @@ Error TraceIntelPTBundleLoader::CreateJSONError(json::Path::Root &root, root.printErrorContext(value, os); return createStringError( std::errc::invalid_argument, "%s\n\nContext:\n%s\n\nSchema:\n%s", - toString(root.getError()).c_str(), os.str().c_str(), GetSchema().data()); + toString(root.getError()).c_str(), err.c_str(), GetSchema().data()); } ThreadPostMortemTraceSP diff --git a/lldb/source/Symbol/Symtab.cpp b/lldb/source/Symbol/Symtab.cpp index 5b5bf5c..3c5075d 100644 --- a/lldb/source/Symbol/Symtab.cpp +++ b/lldb/source/Symbol/Symtab.cpp @@ -1179,7 +1179,7 @@ std::string Symtab::GetCacheKey() { // another object file in a separate symbol file. strm << m_objfile->GetModule()->GetCacheKey() << "-symtab-" << llvm::format_hex(m_objfile->GetCacheHash(), 10); - return strm.str(); + return key; } void Symtab::SaveToCache() { diff --git a/lldb/source/Target/Statistics.cpp b/lldb/source/Target/Statistics.cpp index d619f92..ae2f65e 100644 --- a/lldb/source/Target/Statistics.cpp +++ b/lldb/source/Target/Statistics.cpp @@ -414,7 +414,7 @@ llvm::json::Value DebuggerStats::ReportStatistics( llvm::raw_string_ostream ss(buffer); json::OStream json_os(ss); transcript.Serialize(json_os); - if (auto json_transcript = llvm::json::parse(ss.str())) + if (auto json_transcript = llvm::json::parse(buffer)) global_stats.try_emplace("transcript", std::move(json_transcript.get())); } diff --git a/lldb/source/Target/Target.cpp b/lldb/source/Target/Target.cpp index 3e7e7b7..f1659aa 100644 --- a/lldb/source/Target/Target.cpp +++ b/lldb/source/Target/Target.cpp @@ -4603,7 +4603,7 @@ void TargetProperties::CheckJITObjectsDir() { std::optional<lldb::user_id_t> debugger_id; if (m_target) debugger_id = m_target->GetDebugger().GetID(); - Debugger::ReportError(os.str(), debugger_id); + Debugger::ReportError(buffer, debugger_id); } bool TargetProperties::GetEnableSyntheticValue() const { diff --git a/lldb/source/Utility/LLDBAssert.cpp b/lldb/source/Utility/LLDBAssert.cpp index 4ecd604..b0c39a2 100644 --- a/lldb/source/Utility/LLDBAssert.cpp +++ b/lldb/source/Utility/LLDBAssert.cpp @@ -54,7 +54,7 @@ void lldb_assert(bool expression, const char *expr_text, const char *func, llvm::formatv("Assertion failed: ({0}), function {1}, file {2}, line {3}", expr_text, func, file, line) .str(), - backtrace.str(), + buffer, "Please file a bug report against lldb reporting this failure log, and " "as many details as possible"); } diff --git a/lldb/source/Utility/Log.cpp b/lldb/source/Utility/Log.cpp index 6713a5b..f6b1381 100644 --- a/lldb/source/Utility/Log.cpp +++ b/lldb/source/Utility/Log.cpp @@ -374,7 +374,7 @@ void Log::Format(llvm::StringRef file, llvm::StringRef function, llvm::raw_string_ostream message(message_string); WriteHeader(message, file, function); message << payload << "\n"; - WriteMessage(message.str()); + WriteMessage(message_string); } StreamLogHandler::StreamLogHandler(int fd, bool should_close, diff --git a/lldb/test/API/benchmarks/continue/TestBenchmarkContinue.py b/lldb/test/API/benchmarks/continue/TestBenchmarkContinue.py deleted file mode 100644 index f2f15b3..0000000 --- a/lldb/test/API/benchmarks/continue/TestBenchmarkContinue.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -Test lldb data formatter subsystem. -""" - -import lldb -from lldbsuite.test.decorators import * -from lldbsuite.test.lldbbench import * -from lldbsuite.test.lldbtest import * -from lldbsuite.test import lldbutil - - -class TestBenchmarkContinue(BenchBase): - @benchmarks_test - def test_run_command(self): - """Benchmark different ways to continue a process""" - self.build() - self.data_formatter_commands() - - def setUp(self): - # Call super's setUp(). - BenchBase.setUp(self) - - def data_formatter_commands(self): - """Benchmark different ways to continue a process""" - self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET) - - bkpt = self.target().FindBreakpointByID( - lldbutil.run_break_set_by_source_regexp(self, "// break here") - ) - - self.runCmd("run", RUN_SUCCEEDED) - - # The stop reason of the thread should be breakpoint. - self.expect( - "thread list", - STOPPED_DUE_TO_BREAKPOINT, - substrs=["stopped", "stop reason = breakpoint"], - ) - - # This is the function to remove the custom formats in order to have a - # clean slate for the next test case. - def cleanup(): - self.runCmd("type format clear", check=False) - self.runCmd("type summary clear", check=False) - self.runCmd("type filter clear", check=False) - self.runCmd("type synth clear", check=False) - self.runCmd("settings set target.max-children-count 256", check=False) - - # Execute the cleanup function during test case tear down. - self.addTearDownHook(cleanup) - - runCmd_sw = Stopwatch() - lldbutil_sw = Stopwatch() - - for i in range(0, 15): - runCmd_sw.start() - self.runCmd("continue") - runCmd_sw.stop() - - for i in range(0, 15): - lldbutil_sw.start() - lldbutil.continue_to_breakpoint(self.process(), bkpt) - lldbutil_sw.stop() - - print("runCmd: %s\nlldbutil: %s" % (runCmd_sw, lldbutil_sw)) diff --git a/lldb/test/API/benchmarks/continue/main.cpp b/lldb/test/API/benchmarks/continue/main.cpp deleted file mode 100644 index d715a11..0000000 --- a/lldb/test/API/benchmarks/continue/main.cpp +++ /dev/null @@ -1,36 +0,0 @@ -#include <map> - -#define intint_map std::map<int, int> - -int g_the_foo = 0; - -int thefoo_rw(int arg = 1) -{ - if (arg < 0) - arg = 0; - if (!arg) - arg = 1; - g_the_foo += arg; - return g_the_foo; -} - -int main() -{ - intint_map ii; - - for (int i = 0; i < 15; i++) - { - ii[i] = i + 1; - thefoo_rw(i); // break here - } - - ii.clear(); - - for (int j = 0; j < 15; j++) - { - ii[j] = j + 1; - thefoo_rw(j); // break here - } - - return 0; -} diff --git a/lldb/test/API/benchmarks/expression/Makefile b/lldb/test/API/benchmarks/expression/Makefile deleted file mode 100644 index 99998b2..0000000 --- a/lldb/test/API/benchmarks/expression/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -CXX_SOURCES := main.cpp - -include Makefile.rules diff --git a/lldb/test/API/benchmarks/expression/TestExpressionCmd.py b/lldb/test/API/benchmarks/expression/TestExpressionCmd.py deleted file mode 100644 index 8261b1b..0000000 --- a/lldb/test/API/benchmarks/expression/TestExpressionCmd.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Test lldb's expression evaluations and collect statistics.""" - -import sys -import lldb -from lldbsuite.test.decorators import * -from lldbsuite.test.lldbbench import * -from lldbsuite.test.lldbtest import * -from lldbsuite.test import configuration -from lldbsuite.test import lldbutil - - -class ExpressionEvaluationCase(BenchBase): - def setUp(self): - BenchBase.setUp(self) - self.source = "main.cpp" - self.line_to_break = line_number(self.source, "// Set breakpoint here.") - self.count = 25 - - @benchmarks_test - @add_test_categories(["pexpect"]) - def test_expr_cmd(self): - """Test lldb's expression commands and collect statistics.""" - self.build() - self.exe_name = "a.out" - - print() - self.run_lldb_repeated_exprs(self.exe_name, self.count) - print("lldb expr cmd benchmark:", self.stopwatch) - - def run_lldb_repeated_exprs(self, exe_name, count): - import pexpect - - exe = self.getBuildArtifact(exe_name) - - # Set self.child_prompt, which is "(lldb) ". - self.child_prompt = "(lldb) " - prompt = self.child_prompt - - # Reset the stopwatch now. - self.stopwatch.reset() - for i in range(count): - # So that the child gets torn down after the test. - self.child = pexpect.spawn( - "%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe) - ) - child = self.child - - # Turn on logging for what the child sends back. - if self.TraceOn(): - child.logfile_read = sys.stdout - - child.expect_exact(prompt) - child.sendline( - "breakpoint set -f %s -l %d" % (self.source, self.line_to_break) - ) - child.expect_exact(prompt) - child.sendline("run") - child.expect_exact(prompt) - expr_cmd1 = "expr ptr[j]->point.x" - expr_cmd2 = "expr ptr[j]->point.y" - - with self.stopwatch: - child.sendline(expr_cmd1) - child.expect_exact(prompt) - child.sendline(expr_cmd2) - child.expect_exact(prompt) - - child.sendline("quit") - try: - self.child.expect(pexpect.EOF) - except: - pass - - self.child = None diff --git a/lldb/test/API/benchmarks/expression/TestRepeatedExprs.py b/lldb/test/API/benchmarks/expression/TestRepeatedExprs.py deleted file mode 100644 index acc6b74..0000000 --- a/lldb/test/API/benchmarks/expression/TestRepeatedExprs.py +++ /dev/null @@ -1,131 +0,0 @@ -"""Test evaluating expressions repeatedly comparing lldb against gdb.""" - -import sys -import lldb -from lldbsuite.test.lldbbench import BenchBase -from lldbsuite.test.decorators import * -from lldbsuite.test.lldbtest import * -from lldbsuite.test import configuration -from lldbsuite.test import lldbutil - - -class RepeatedExprsCase(BenchBase): - def setUp(self): - BenchBase.setUp(self) - self.source = "main.cpp" - self.line_to_break = line_number(self.source, "// Set breakpoint here.") - self.lldb_avg = None - self.gdb_avg = None - self.count = 100 - - @benchmarks_test - @add_test_categories(["pexpect"]) - def test_compare_lldb_to_gdb(self): - """Test repeated expressions with lldb vs. gdb.""" - self.build() - self.exe_name = "a.out" - - print() - self.run_lldb_repeated_exprs(self.exe_name, self.count) - print("lldb benchmark:", self.stopwatch) - self.run_gdb_repeated_exprs(self.exe_name, self.count) - print("gdb benchmark:", self.stopwatch) - print("lldb_avg/gdb_avg: %f" % (self.lldb_avg / self.gdb_avg)) - - def run_lldb_repeated_exprs(self, exe_name, count): - import pexpect - - exe = self.getBuildArtifact(exe_name) - - # Set self.child_prompt, which is "(lldb) ". - self.child_prompt = "(lldb) " - prompt = self.child_prompt - - # So that the child gets torn down after the test. - self.child = pexpect.spawn( - "%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe) - ) - child = self.child - - # Turn on logging for what the child sends back. - if self.TraceOn(): - child.logfile_read = sys.stdout - - child.expect_exact(prompt) - child.sendline("breakpoint set -f %s -l %d" % (self.source, self.line_to_break)) - child.expect_exact(prompt) - child.sendline("run") - child.expect_exact(prompt) - expr_cmd1 = "expr ptr[j]->point.x" - expr_cmd2 = "expr ptr[j]->point.y" - - # Reset the stopwatch now. - self.stopwatch.reset() - for i in range(count): - with self.stopwatch: - child.sendline(expr_cmd1) - child.expect_exact(prompt) - child.sendline(expr_cmd2) - child.expect_exact(prompt) - child.sendline("process continue") - child.expect_exact(prompt) - - child.sendline("quit") - try: - self.child.expect(pexpect.EOF) - except: - pass - - self.lldb_avg = self.stopwatch.avg() - if self.TraceOn(): - print("lldb expression benchmark:", str(self.stopwatch)) - self.child = None - - def run_gdb_repeated_exprs(self, exe_name, count): - import pexpect - - exe = self.getBuildArtifact(exe_name) - - # Set self.child_prompt, which is "(gdb) ". - self.child_prompt = "(gdb) " - prompt = self.child_prompt - - # So that the child gets torn down after the test. - self.child = pexpect.spawn("gdb --nx %s" % exe) - child = self.child - - # Turn on logging for what the child sends back. - if self.TraceOn(): - child.logfile_read = sys.stdout - - child.expect_exact(prompt) - child.sendline("break %s:%d" % (self.source, self.line_to_break)) - child.expect_exact(prompt) - child.sendline("run") - child.expect_exact(prompt) - expr_cmd1 = "print ptr[j]->point.x" - expr_cmd2 = "print ptr[j]->point.y" - - # Reset the stopwatch now. - self.stopwatch.reset() - for i in range(count): - with self.stopwatch: - child.sendline(expr_cmd1) - child.expect_exact(prompt) - child.sendline(expr_cmd2) - child.expect_exact(prompt) - child.sendline("continue") - child.expect_exact(prompt) - - child.sendline("quit") - child.expect_exact("The program is running. Exit anyway?") - child.sendline("y") - try: - self.child.expect(pexpect.EOF) - except: - pass - - self.gdb_avg = self.stopwatch.avg() - if self.TraceOn(): - print("gdb expression benchmark:", str(self.stopwatch)) - self.child = None diff --git a/lldb/test/API/benchmarks/expression/main.cpp b/lldb/test/API/benchmarks/expression/main.cpp deleted file mode 100644 index 1a095d3..0000000 --- a/lldb/test/API/benchmarks/expression/main.cpp +++ /dev/null @@ -1,43 +0,0 @@ -#include <stdio.h> - -class Point { -public: - int x; - int y; - Point(int a, int b): - x(a), - y(b) - {} -}; - -class Data { -public: - int id; - Point point; - Data(int i): - id(i), - point(0, 0) - {} -}; - -int main(int argc, char const *argv[]) { - Data *data[1000]; - Data **ptr = data; - for (int i = 0; i < 1000; ++i) { - ptr[i] = new Data(i); - ptr[i]->point.x = i; - ptr[i]->point.y = i+1; - } - - printf("Finished populating data.\n"); - for (int j = 0; j < 1000; ++j) { - bool dump = argc > 1; // Set breakpoint here. - // Evaluate a couple of expressions (2*1000 = 2000 exprs): - // expr ptr[j]->point.x - // expr ptr[j]->point.y - if (dump) { - printf("data[%d] = %d (%d, %d)\n", j, ptr[j]->id, ptr[j]->point.x, ptr[j]->point.y); - } - } - return 0; -} diff --git a/lldb/test/API/benchmarks/frame_variable/TestFrameVariableResponse.py b/lldb/test/API/benchmarks/frame_variable/TestFrameVariableResponse.py deleted file mode 100644 index e364fb8..0000000 --- a/lldb/test/API/benchmarks/frame_variable/TestFrameVariableResponse.py +++ /dev/null @@ -1,68 +0,0 @@ -"""Test lldb's response time for 'frame variable' command.""" - -import sys -import lldb -from lldbsuite.test import configuration -from lldbsuite.test import lldbtest_config -from lldbsuite.test.decorators import * -from lldbsuite.test.lldbbench import * - - -class FrameVariableResponseBench(BenchBase): - def setUp(self): - BenchBase.setUp(self) - self.exe = lldbtest_config.lldbExec - self.break_spec = "-n main" - self.count = 20 - - @benchmarks_test - @no_debug_info_test - @add_test_categories(["pexpect"]) - def test_startup_delay(self): - """Test response time for the 'frame variable' command.""" - print() - self.run_frame_variable_bench(self.exe, self.break_spec, self.count) - print("lldb frame variable benchmark:", self.stopwatch) - - def run_frame_variable_bench(self, exe, break_spec, count): - import pexpect - - # Set self.child_prompt, which is "(lldb) ". - self.child_prompt = "(lldb) " - prompt = self.child_prompt - - # Reset the stopwatchs now. - self.stopwatch.reset() - for i in range(count): - # So that the child gets torn down after the test. - self.child = pexpect.spawn( - "%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe) - ) - child = self.child - - # Turn on logging for what the child sends back. - if self.TraceOn(): - child.logfile_read = sys.stdout - - # Set our breakpoint. - child.sendline("breakpoint set %s" % break_spec) - child.expect_exact(prompt) - - # Run the target and expect it to be stopped due to breakpoint. - child.sendline("run") # Aka 'process launch'. - child.expect_exact(prompt) - - with self.stopwatch: - # Measure the 'frame variable' response time. - child.sendline("frame variable") - child.expect_exact(prompt) - - child.sendline("quit") - try: - self.child.expect(pexpect.EOF) - except: - pass - - # The test is about to end and if we come to here, the child process has - # been terminated. Mark it so. - self.child = None diff --git a/lldb/test/API/benchmarks/libcxxlist/Makefile b/lldb/test/API/benchmarks/libcxxlist/Makefile deleted file mode 100644 index 99998b2..0000000 --- a/lldb/test/API/benchmarks/libcxxlist/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -CXX_SOURCES := main.cpp - -include Makefile.rules diff --git a/lldb/test/API/benchmarks/libcxxlist/TestBenchmarkLibcxxList.py b/lldb/test/API/benchmarks/libcxxlist/TestBenchmarkLibcxxList.py deleted file mode 100644 index 01de980..0000000 --- a/lldb/test/API/benchmarks/libcxxlist/TestBenchmarkLibcxxList.py +++ /dev/null @@ -1,58 +0,0 @@ -""" -Test lldb data formatter subsystem. -""" - -import lldb -from lldbsuite.test.decorators import * -from lldbsuite.test.lldbbench import * -from lldbsuite.test.lldbtest import * -from lldbsuite.test import lldbutil - - -class TestBenchmarkLibcxxList(BenchBase): - @benchmarks_test - def test_run_command(self): - """Benchmark the std::list data formatter (libc++)""" - self.build() - self.data_formatter_commands() - - def setUp(self): - # Call super's setUp(). - BenchBase.setUp(self) - - def data_formatter_commands(self): - """Benchmark the std::list data formatter (libc++)""" - self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET) - - bkpt = self.target().FindBreakpointByID( - lldbutil.run_break_set_by_source_regexp(self, "break here") - ) - - self.runCmd("run", RUN_SUCCEEDED) - - # The stop reason of the thread should be breakpoint. - self.expect( - "thread list", - STOPPED_DUE_TO_BREAKPOINT, - substrs=["stopped", "stop reason = breakpoint"], - ) - - # This is the function to remove the custom formats in order to have a - # clean slate for the next test case. - def cleanup(): - self.runCmd("type format clear", check=False) - self.runCmd("type summary clear", check=False) - self.runCmd("type filter clear", check=False) - self.runCmd("type synth clear", check=False) - self.runCmd("settings set target.max-children-count 256", check=False) - - # Execute the cleanup function during test case tear down. - self.addTearDownHook(cleanup) - - sw = Stopwatch() - - sw.start() - self.expect("frame variable -A list", substrs=["[300]", "300"]) - sw.stop() - - print("time to print: %s" % (sw)) diff --git a/lldb/test/API/benchmarks/libcxxlist/main.cpp b/lldb/test/API/benchmarks/libcxxlist/main.cpp deleted file mode 100644 index 9c4113a..0000000 --- a/lldb/test/API/benchmarks/libcxxlist/main.cpp +++ /dev/null @@ -1,11 +0,0 @@ -#include <list> - -int main() -{ - std::list<int> list; - for (int i = 0; - i < 1500; - i++) - list.push_back(i); - return list.size(); // break here -} diff --git a/lldb/test/API/benchmarks/libcxxmap/Makefile b/lldb/test/API/benchmarks/libcxxmap/Makefile deleted file mode 100644 index 99998b2..0000000 --- a/lldb/test/API/benchmarks/libcxxmap/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -CXX_SOURCES := main.cpp - -include Makefile.rules diff --git a/lldb/test/API/benchmarks/libcxxmap/TestBenchmarkLibcxxMap.py b/lldb/test/API/benchmarks/libcxxmap/TestBenchmarkLibcxxMap.py deleted file mode 100644 index 10056ab..0000000 --- a/lldb/test/API/benchmarks/libcxxmap/TestBenchmarkLibcxxMap.py +++ /dev/null @@ -1,58 +0,0 @@ -""" -Test lldb data formatter subsystem. -""" - -import lldb -from lldbsuite.test.lldbbench import * -from lldbsuite.test.decorators import * -from lldbsuite.test.lldbtest import * -from lldbsuite.test import lldbutil - - -class TestBenchmarkLibcxxMap(BenchBase): - @benchmarks_test - def test_run_command(self): - """Benchmark the std::map data formatter (libc++)""" - self.build() - self.data_formatter_commands() - - def setUp(self): - # Call super's setUp(). - BenchBase.setUp(self) - - def data_formatter_commands(self): - """Benchmark the std::map data formatter (libc++)""" - self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET) - - bkpt = self.target().FindBreakpointByID( - lldbutil.run_break_set_by_source_regexp(self, "break here") - ) - - self.runCmd("run", RUN_SUCCEEDED) - - # The stop reason of the thread should be breakpoint. - self.expect( - "thread list", - STOPPED_DUE_TO_BREAKPOINT, - substrs=["stopped", "stop reason = breakpoint"], - ) - - # This is the function to remove the custom formats in order to have a - # clean slate for the next test case. - def cleanup(): - self.runCmd("type format clear", check=False) - self.runCmd("type summary clear", check=False) - self.runCmd("type filter clear", check=False) - self.runCmd("type synth clear", check=False) - self.runCmd("settings set target.max-children-count 256", check=False) - - # Execute the cleanup function during test case tear down. - self.addTearDownHook(cleanup) - - sw = Stopwatch() - - sw.start() - self.expect("frame variable -A map", substrs=["[300]", "300"]) - sw.stop() - - print("time to print: %s" % (sw)) diff --git a/lldb/test/API/benchmarks/libcxxmap/main.cpp b/lldb/test/API/benchmarks/libcxxmap/main.cpp deleted file mode 100644 index 45efb26..0000000 --- a/lldb/test/API/benchmarks/libcxxmap/main.cpp +++ /dev/null @@ -1,11 +0,0 @@ -#include <map> - -int main() -{ - std::map<int, int> map; - for (int i = 0; - i < 1500; - i++) - map[i] = i; - return map.size(); // break here -} diff --git a/lldb/test/API/benchmarks/startup/TestStartupDelays.py b/lldb/test/API/benchmarks/startup/TestStartupDelays.py deleted file mode 100644 index faec21e..0000000 --- a/lldb/test/API/benchmarks/startup/TestStartupDelays.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Test lldb's startup delays creating a target, setting a breakpoint, and run to breakpoint stop.""" - -import sys -import lldb -from lldbsuite.test import configuration -from lldbsuite.test import lldbtest_config -from lldbsuite.test.decorators import * -from lldbsuite.test.lldbbench import * - - -class StartupDelaysBench(BenchBase): - def setUp(self): - BenchBase.setUp(self) - # Create self.stopwatch2 for measuring "set first breakpoint". - # The default self.stopwatch is for "create fresh target". - self.stopwatch2 = Stopwatch() - # Create self.stopwatch3 for measuring "run to breakpoint". - self.stopwatch3 = Stopwatch() - self.exe = lldbtest_config.lldbExec - self.break_spec = "-n main" - self.count = 30 - - @benchmarks_test - @no_debug_info_test - @add_test_categories(["pexpect"]) - def test_startup_delay(self): - """Test start up delays creating a target, setting a breakpoint, and run to breakpoint stop.""" - print() - self.run_startup_delays_bench(self.exe, self.break_spec, self.count) - print("lldb startup delay (create fresh target) benchmark:", self.stopwatch) - print("lldb startup delay (set first breakpoint) benchmark:", self.stopwatch2) - print("lldb startup delay (run to breakpoint) benchmark:", self.stopwatch3) - - def run_startup_delays_bench(self, exe, break_spec, count): - import pexpect - - # Set self.child_prompt, which is "(lldb) ". - self.child_prompt = "(lldb) " - prompt = self.child_prompt - - # Reset the stopwatchs now. - self.stopwatch.reset() - self.stopwatch2.reset() - for i in range(count): - # So that the child gets torn down after the test. - self.child = pexpect.spawn( - "%s %s" % (lldbtest_config.lldbExec, self.lldbOption) - ) - child = self.child - - # Turn on logging for what the child sends back. - if self.TraceOn(): - child.logfile_read = sys.stdout - - with self.stopwatch: - # Create a fresh target. - child.sendline("file %s" % exe) # Aka 'target create'. - child.expect_exact(prompt) - - with self.stopwatch2: - # Read debug info and set the first breakpoint. - child.sendline("breakpoint set %s" % break_spec) - child.expect_exact(prompt) - - with self.stopwatch3: - # Run to the breakpoint just set. - child.sendline("run") - child.expect_exact(prompt) - - child.sendline("quit") - try: - self.child.expect(pexpect.EOF) - except: - pass - - # The test is about to end and if we come to here, the child process has - # been terminated. Mark it so. - self.child = None diff --git a/lldb/test/API/benchmarks/stepping/TestSteppingSpeed.py b/lldb/test/API/benchmarks/stepping/TestSteppingSpeed.py deleted file mode 100644 index d0f9b0d61..0000000 --- a/lldb/test/API/benchmarks/stepping/TestSteppingSpeed.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Test lldb's stepping speed.""" - -import sys -import lldb -from lldbsuite.test import configuration -from lldbsuite.test import lldbtest_config -from lldbsuite.test.lldbbench import * -from lldbsuite.test.decorators import * -from lldbsuite.test.lldbtest import * -from lldbsuite.test import lldbutil - - -class SteppingSpeedBench(BenchBase): - def setUp(self): - BenchBase.setUp(self) - self.exe = lldbtest_config.lldbExec - self.break_spec = "-n main" - self.count = 50 - - self.trace("self.exe=%s" % self.exe) - self.trace("self.break_spec=%s" % self.break_spec) - - @benchmarks_test - @no_debug_info_test - @add_test_categories(["pexpect"]) - def test_run_lldb_steppings(self): - """Test lldb steppings on a large executable.""" - print() - self.run_lldb_steppings(self.exe, self.break_spec, self.count) - print("lldb stepping benchmark:", self.stopwatch) - - def run_lldb_steppings(self, exe, break_spec, count): - import pexpect - - # Set self.child_prompt, which is "(lldb) ". - self.child_prompt = "(lldb) " - prompt = self.child_prompt - - # So that the child gets torn down after the test. - self.child = pexpect.spawn( - "%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe) - ) - child = self.child - - # Turn on logging for what the child sends back. - if self.TraceOn(): - child.logfile_read = sys.stdout - - child.expect_exact(prompt) - child.sendline("breakpoint set %s" % break_spec) - child.expect_exact(prompt) - child.sendline("run") - child.expect_exact(prompt) - - # Reset the stopwatch now. - self.stopwatch.reset() - for i in range(count): - with self.stopwatch: - # Disassemble the function. - child.sendline("next") # Aka 'thread step-over'. - child.expect_exact(prompt) - - child.sendline("quit") - try: - self.child.expect(pexpect.EOF) - except: - pass - - self.child = None diff --git a/lldb/test/API/benchmarks/turnaround/TestCompileRunToBreakpointTurnaround.py b/lldb/test/API/benchmarks/turnaround/TestCompileRunToBreakpointTurnaround.py deleted file mode 100644 index 91527cd..0000000 --- a/lldb/test/API/benchmarks/turnaround/TestCompileRunToBreakpointTurnaround.py +++ /dev/null @@ -1,122 +0,0 @@ -"""Benchmark the turnaround time starting a debugger and run to the breakpoint with lldb vs. gdb.""" - -import sys -import lldb -from lldbsuite.test.lldbbench import * -from lldbsuite.test.decorators import * -from lldbsuite.test.lldbtest import * -from lldbsuite.test import configuration -from lldbsuite.test import lldbutil - - -class CompileRunToBreakpointBench(BenchBase): - def setUp(self): - BenchBase.setUp(self) - self.exe = lldbtest_config.lldbExec - self.function = "Driver::MainLoop()" - self.count = 3 - - self.lldb_avg = None - self.gdb_avg = None - - @benchmarks_test - @no_debug_info_test - @add_test_categories(["pexpect"]) - def test_run_lldb_then_gdb(self): - """Benchmark turnaround time with lldb vs. gdb.""" - print() - self.run_lldb_turnaround(self.exe, self.function, self.count) - print("lldb turnaround benchmark:", self.stopwatch) - self.run_gdb_turnaround(self.exe, self.function, self.count) - print("gdb turnaround benchmark:", self.stopwatch) - print("lldb_avg/gdb_avg: %f" % (self.lldb_avg / self.gdb_avg)) - - def run_lldb_turnaround(self, exe, function, count): - import pexpect - - def run_one_round(): - prompt = self.child_prompt - - # So that the child gets torn down after the test. - self.child = pexpect.spawn( - "%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe) - ) - child = self.child - - # Turn on logging for what the child sends back. - if self.TraceOn(): - child.logfile_read = sys.stdout - - child.expect_exact(prompt) - child.sendline("breakpoint set -F %s" % function) - child.expect_exact(prompt) - child.sendline("run") - child.expect_exact(prompt) - - # Set self.child_prompt, which is "(lldb) ". - self.child_prompt = "(lldb) " - # Reset the stopwatch now. - self.stopwatch.reset() - - for i in range(count + 1): - # Ignore the first invoke lldb and run to the breakpoint turnaround - # time. - if i == 0: - run_one_round() - else: - with self.stopwatch: - run_one_round() - - self.child.sendline("quit") - try: - self.child.expect(pexpect.EOF) - except: - pass - - self.lldb_avg = self.stopwatch.avg() - self.child = None - - def run_gdb_turnaround(self, exe, function, count): - import pexpect - - def run_one_round(): - prompt = self.child_prompt - - # So that the child gets torn down after the test. - self.child = pexpect.spawn("gdb --nx %s" % exe) - child = self.child - - # Turn on logging for what the child sends back. - if self.TraceOn(): - child.logfile_read = sys.stdout - - child.expect_exact(prompt) - child.sendline("break %s" % function) - child.expect_exact(prompt) - child.sendline("run") - child.expect_exact(prompt) - - # Set self.child_prompt, which is "(gdb) ". - self.child_prompt = "(gdb) " - # Reset the stopwatch now. - self.stopwatch.reset() - - for i in range(count + 1): - # Ignore the first invoke lldb and run to the breakpoint turnaround - # time. - if i == 0: - run_one_round() - else: - with self.stopwatch: - run_one_round() - - self.child.sendline("quit") - self.child.expect_exact("The program is running. Exit anyway?") - self.child.sendline("y") - try: - self.child.expect(pexpect.EOF) - except: - pass - - self.gdb_avg = self.stopwatch.avg() - self.child = None diff --git a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/list/TestDataFormatterGenericList.py b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/list/TestDataFormatterGenericList.py index 270aab1..8546b1f 100644 --- a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/list/TestDataFormatterGenericList.py +++ b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/list/TestDataFormatterGenericList.py @@ -60,9 +60,7 @@ class GenericListDataFormatterTestCase(TestBase): self.runCmd("type format add -f hex int") self.expect( - "frame variable numbers_list --raw", - matching=False, - substrs=["size=0", "{}"], + "frame variable numbers_list --raw", matching=False, substrs=["size=0"] ) if stdlib_type == USE_LIBSTDCPP: diff --git a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/string/TestDataFormatterLibcxxStringSimulator.py b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/string/TestDataFormatterLibcxxStringSimulator.py index 98d2c73..afe6374 100644 --- a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/string/TestDataFormatterLibcxxStringSimulator.py +++ b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/string/TestDataFormatterLibcxxStringSimulator.py @@ -28,12 +28,13 @@ class LibcxxStringDataFormatterSimulatorTestCase(TestBase): for v in [None, "ALTERNATE_LAYOUT"]: for r in range(5): - name = "test_r%d" % r - defines = ["REVISION=%d" % r] - if v: - name += "_" + v - defines += [v] - f = functools.partialmethod( - LibcxxStringDataFormatterSimulatorTestCase._run_test, defines - ) - setattr(LibcxxStringDataFormatterSimulatorTestCase, name, f) + for c in range(3): + name = "test_r%d_c%d" % (r, c) + defines = ["REVISION=%d" % r, "COMPRESSED_PAIR_REV=%d" % c] + if v: + name += "_" + v + defines += [v] + f = functools.partialmethod( + LibcxxStringDataFormatterSimulatorTestCase._run_test, defines + ) + setattr(LibcxxStringDataFormatterSimulatorTestCase, name, f) diff --git a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/string/main.cpp b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/string/main.cpp index b010dc2..f8fc13c 100644 --- a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/string/main.cpp +++ b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/string/main.cpp @@ -184,31 +184,50 @@ public: }; }; + __long &getLongRep() { +#if COMPRESSED_PAIR_REV == 0 + return __r_.first().__l; +#elif COMPRESSED_PAIR_REV <= 2 + return __rep_.__l; +#endif + } + + __short &getShortRep() { +#if COMPRESSED_PAIR_REV == 0 + return __r_.first().__s; +#elif COMPRESSED_PAIR_REV <= 2 + return __rep_.__s; +#endif + } + +#if COMPRESSED_PAIR_REV == 0 std::__lldb::__compressed_pair<__rep, allocator_type> __r_; +#elif COMPRESSED_PAIR_REV <= 2 + _LLDB_COMPRESSED_PAIR(__rep, __rep_, allocator_type, __alloc_); +#endif public: template <size_t __N> - basic_string(unsigned char __size, const value_type (&__data)[__N]) - : __r_({}, {}) { + basic_string(unsigned char __size, const value_type (&__data)[__N]) { static_assert(__N < __min_cap, ""); #ifdef BITMASKS - __r_.first().__s.__size_ = __size << __short_shift; + getShortRep().__size_ = __size << __short_shift; #else - __r_.first().__s.__size_ = __size; - __r_.first().__s.__is_long_ = false; + getShortRep().__size_ = __size; + getShortRep().__is_long_ = false; #endif for (size_t __i = 0; __i < __N; ++__i) - __r_.first().__s.__data_[__i] = __data[__i]; + getShortRep().__data_[__i] = __data[__i]; } - basic_string(size_t __cap, size_type __size, pointer __data) : __r_({}, {}) { + basic_string(size_t __cap, size_type __size, pointer __data) { #ifdef BITMASKS - __r_.first().__l.__cap_ = __cap | __long_mask; + getLongRep().__cap_ = __cap | __long_mask; #else - __r_.first().__l.__cap_ = __cap / __endian_factor; - __r_.first().__l.__is_long_ = true; + getLongRep().__cap_ = __cap / __endian_factor; + getLongRep().__is_long_ = true; #endif - __r_.first().__l.__size_ = __size; - __r_.first().__l.__data_ = __data; + getLongRep().__size_ = __size; + getLongRep().__data_ = __data; } }; diff --git a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/unique_ptr/TestDataFormatterLibcxxUniquePtrSimulator.py b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/unique_ptr/TestDataFormatterLibcxxUniquePtrSimulator.py index da780f5..0026eca 100644 --- a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/unique_ptr/TestDataFormatterLibcxxUniquePtrSimulator.py +++ b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/unique_ptr/TestDataFormatterLibcxxUniquePtrSimulator.py @@ -7,13 +7,15 @@ import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil +import functools class LibcxxUniquePtrDataFormatterSimulatorTestCase(TestBase): NO_DEBUG_INFO_TESTCASE = True - def test(self): - self.build() + def _run_test(self, defines): + cxxflags_extras = " ".join(["-D%s" % d for d in defines]) + self.build(dictionary=dict(CXXFLAGS_EXTRAS=cxxflags_extras)) lldbutil.run_to_source_breakpoint( self, "Break here", lldb.SBFileSpec("main.cpp") ) @@ -22,3 +24,12 @@ class LibcxxUniquePtrDataFormatterSimulatorTestCase(TestBase): self.expect( "frame variable var_with_deleter_up", substrs=["pointer =", "deleter ="] ) + + +for r in range(3): + name = "test_r%d" % r + defines = ["COMPRESSED_PAIR_REV=%d" % r] + f = functools.partialmethod( + LibcxxUniquePtrDataFormatterSimulatorTestCase._run_test, defines + ) + setattr(LibcxxUniquePtrDataFormatterSimulatorTestCase, name, f) diff --git a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/unique_ptr/main.cpp b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/unique_ptr/main.cpp index a6bfaa3..91a0195 100644 --- a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/unique_ptr/main.cpp +++ b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/unique_ptr/main.cpp @@ -16,9 +16,14 @@ public: typedef _Dp deleter_type; typedef _Tp *pointer; +#if COMPRESSED_PAIR_REV == 0 std::__lldb::__compressed_pair<pointer, deleter_type> __ptr_; explicit unique_ptr(pointer __p) noexcept : __ptr_(__p, std::__lldb::__value_init_tag()) {} +#elif COMPRESSED_PAIR_REV == 1 || COMPRESSED_PAIR_REV == 2 + _LLDB_COMPRESSED_PAIR(pointer, __ptr_, deleter_type, __deleter_); + explicit unique_ptr(pointer __p) noexcept : __ptr_(__p), __deleter_() {} +#endif }; } // namespace __lldb } // namespace std diff --git a/lldb/test/API/benchmarks/continue/Makefile b/lldb/test/API/lang/cpp/no_unique_address/Makefile index 99998b2..99998b2 100644 --- a/lldb/test/API/benchmarks/continue/Makefile +++ b/lldb/test/API/lang/cpp/no_unique_address/Makefile diff --git a/lldb/test/API/lang/cpp/no_unique_address/TestNoUniqueAddress.py b/lldb/test/API/lang/cpp/no_unique_address/TestNoUniqueAddress.py new file mode 100644 index 0000000..d16aaa1 --- /dev/null +++ b/lldb/test/API/lang/cpp/no_unique_address/TestNoUniqueAddress.py @@ -0,0 +1,67 @@ +""" +Test that LLDB correctly handles fields +marked with [[no_unique_address]]. +""" + +import lldb +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class NoUniqueAddressTestCase(TestBase): + def test(self): + self.build() + lldbutil.run_to_source_breakpoint( + self, "return 0", lldb.SBFileSpec("main.cpp", False) + ) + + # Qualified/unqualified lookup to templates in namespace + self.expect_expr( + "b1", + result_type="basic::Foo", + result_children=[ValueCheck(name="a", type="Empty")], + ) + + self.expect_expr( + "b2", + result_type="bases::Foo", + result_children=[ + ValueCheck( + type="bases::B", children=[ValueCheck(name="x", type="Empty")] + ), + ValueCheck( + type="bases::A", + children=[ + ValueCheck(name="c", type="long", value="1"), + ValueCheck(name="d", type="long", value="2"), + ], + ), + ValueCheck( + type="bases::C", children=[ValueCheck(name="x", type="Empty")] + ), + ], + ) + self.expect_expr( + "b3", + result_type="bases::Bar", + result_children=[ + ValueCheck( + type="bases::B", children=[ValueCheck(name="x", type="Empty")] + ), + ValueCheck( + type="bases::C", children=[ValueCheck(name="x", type="Empty")] + ), + ValueCheck( + type="bases::A", + children=[ + ValueCheck(name="c", type="long", value="5"), + ValueCheck(name="d", type="long", value="6"), + ], + ), + ], + ) + + self.expect("frame var b1") + self.expect("frame var b2") + self.expect("frame var b3") diff --git a/lldb/test/API/lang/cpp/no_unique_address/main.cpp b/lldb/test/API/lang/cpp/no_unique_address/main.cpp new file mode 100644 index 0000000..424fa90 --- /dev/null +++ b/lldb/test/API/lang/cpp/no_unique_address/main.cpp @@ -0,0 +1,35 @@ +struct Empty {}; + +namespace basic { +struct Foo { + [[no_unique_address]] Empty a; +}; +} // namespace basic + +namespace bases { +struct A { + long c, d; +}; + +struct B { + [[no_unique_address]] Empty x; +}; + +struct C { + [[no_unique_address]] Empty x; +}; + +struct Foo : B, A, C {}; +struct Bar : B, C, A {}; +} // namespace bases + +int main() { + basic::Foo b1; + bases::Foo b2; + bases::Bar b3; + b2.c = 1; + b2.d = 2; + b3.c = 5; + b3.d = 6; + return 0; +} diff --git a/lldb/tools/lldb-dap/DAP.cpp b/lldb/tools/lldb-dap/DAP.cpp index 6012ee5..fe1ca18 100644 --- a/lldb/tools/lldb-dap/DAP.cpp +++ b/lldb/tools/lldb-dap/DAP.cpp @@ -184,12 +184,11 @@ void DAP::SendJSON(const std::string &json_str) { // Serialize the JSON value into a string and send the JSON packet to // the "out" stream. void DAP::SendJSON(const llvm::json::Value &json) { - std::string s; - llvm::raw_string_ostream strm(s); + std::string json_str; + llvm::raw_string_ostream strm(json_str); strm << json; static std::mutex mutex; std::lock_guard<std::mutex> locker(mutex); - std::string json_str = strm.str(); SendJSON(json_str); if (log) { @@ -660,7 +659,6 @@ PacketStatus DAP::GetNextObject(llvm::json::Object &object) { std::string error_str; llvm::raw_string_ostream strm(error_str); strm << error; - strm.flush(); *log << "error: failed to parse JSON: " << error_str << std::endl << json << std::endl; } diff --git a/lldb/tools/lldb-dap/JSONUtils.cpp b/lldb/tools/lldb-dap/JSONUtils.cpp index 342859a..f175079 100644 --- a/lldb/tools/lldb-dap/JSONUtils.cpp +++ b/lldb/tools/lldb-dap/JSONUtils.cpp @@ -1480,7 +1480,6 @@ std::string JSONToString(const llvm::json::Value &json) { std::string data; llvm::raw_string_ostream os(data); os << json; - os.flush(); return data; } diff --git a/lldb/tools/lldb-dap/LLDBUtils.cpp b/lldb/tools/lldb-dap/LLDBUtils.cpp index 2da1078..ff6bea1 100644 --- a/lldb/tools/lldb-dap/LLDBUtils.cpp +++ b/lldb/tools/lldb-dap/LLDBUtils.cpp @@ -84,7 +84,6 @@ std::string RunLLDBCommands(llvm::StringRef prefix, llvm::raw_string_ostream strm(s); required_command_failed = !RunLLDBCommands(prefix, commands, strm, parse_command_directives); - strm.flush(); return s; } diff --git a/lldb/tools/lldb-dap/lldb-dap.cpp b/lldb/tools/lldb-dap/lldb-dap.cpp index 51765cd..c2ebc9a 100644 --- a/lldb/tools/lldb-dap/lldb-dap.cpp +++ b/lldb/tools/lldb-dap/lldb-dap.cpp @@ -635,7 +635,6 @@ void SetSourceMapFromArguments(const llvm::json::Object &arguments) { // Do any source remapping needed before we create our targets strm << "\".\" \"" << sourcePath << "\""; } - strm.flush(); if (!sourceMapCommand.empty()) { g_dap.RunLLDBCommands("Setting source map:", {sourceMapCommand}); } @@ -4127,7 +4126,6 @@ void request_disassemble(const llvm::json::Object &request) { sb << llvm::format("%2.2x ", b); } } - sb.flush(); llvm::json::Object disassembled_inst{ {"address", "0x" + llvm::utohexstr(inst_addr)}, @@ -4158,7 +4156,6 @@ void request_disassemble(const llvm::json::Object &request) { if (c && c[0]) { si << " ; " << c; } - si.flush(); disassembled_inst.try_emplace("instruction", instruction); diff --git a/lldb/tools/lldb-instr/Instrument.cpp b/lldb/tools/lldb-instr/Instrument.cpp index d07ccf1..ba6f9a7 100644 --- a/lldb/tools/lldb-instr/Instrument.cpp +++ b/lldb/tools/lldb-instr/Instrument.cpp @@ -59,13 +59,13 @@ public: if (C->getBeginLoc().isMacroID()) { CharSourceRange Range = MyRewriter.getSourceMgr().getExpansionRange(C->getSourceRange()); - MyRewriter.ReplaceText(Range, Macro.str()); + MyRewriter.ReplaceText(Range, Buffer); } else { Macro << ";"; SourceLocation InsertLoc = Lexer::getLocForEndOfToken( Body->getBeginLoc(), 0, MyRewriter.getSourceMgr(), MyRewriter.getLangOpts()); - MyRewriter.InsertTextAfter(InsertLoc, Macro.str()); + MyRewriter.InsertTextAfter(InsertLoc, Buffer); } break; } diff --git a/lldb/tools/lldb-server/LLDBServerUtilities.cpp b/lldb/tools/lldb-server/LLDBServerUtilities.cpp index 5facfbf..0891f0e 100644 --- a/lldb/tools/lldb-server/LLDBServerUtilities.cpp +++ b/lldb/tools/lldb-server/LLDBServerUtilities.cpp @@ -73,7 +73,7 @@ bool LLDBServerUtilities::SetupLogging(const std::string &log_file, channel_then_categories.GetArgumentArrayRef(), error_stream); if (!success) { errs() << formatv("Unable to setup logging for channel \"{0}\": {1}", - channel, error_stream.str()); + channel, error); return false; } } diff --git a/lldb/tools/lldb-test/lldb-test.cpp b/lldb/tools/lldb-test/lldb-test.cpp index 535422a..ce21e3d 100644 --- a/lldb/tools/lldb-test/lldb-test.cpp +++ b/lldb/tools/lldb-test/lldb-test.cpp @@ -414,7 +414,7 @@ std::string opts::breakpoint::substitute(StringRef Cmd) { break; } } - return std::move(OS.str()); + return Result; } int opts::breakpoint::evaluateBreakpoints(Debugger &Dbg) { diff --git a/lldb/unittests/Symbol/PostfixExpressionTest.cpp b/lldb/unittests/Symbol/PostfixExpressionTest.cpp index 1f9b2af4..d56df47 100644 --- a/lldb/unittests/Symbol/PostfixExpressionTest.cpp +++ b/lldb/unittests/Symbol/PostfixExpressionTest.cpp @@ -161,7 +161,7 @@ static std::string ParseAndGenerateDWARF(llvm::StringRef expr) { llvm::raw_string_ostream os(result); llvm::DWARFExpression(extractor, addr_size, llvm::dwarf::DWARF32) .print(os, llvm::DIDumpOptions(), nullptr); - return std::move(os.str()); + return result; } TEST(PostfixExpression, ToDWARF) { diff --git a/lldb/unittests/SymbolFile/NativePDB/PdbFPOProgramToDWARFExpressionTests.cpp b/lldb/unittests/SymbolFile/NativePDB/PdbFPOProgramToDWARFExpressionTests.cpp index f074927..efb8f720 100644 --- a/lldb/unittests/SymbolFile/NativePDB/PdbFPOProgramToDWARFExpressionTests.cpp +++ b/lldb/unittests/SymbolFile/NativePDB/PdbFPOProgramToDWARFExpressionTests.cpp @@ -43,7 +43,7 @@ CheckValidProgramTranslation(llvm::StringRef fpo_program, .print(os, llvm::DIDumpOptions(), nullptr); // actual check - ASSERT_EQ(expected_dwarf_expression, os.str()); + ASSERT_EQ(expected_dwarf_expression, result); } TEST(PDBFPOProgramToDWARFExpressionTests, SingleAssignmentRegisterRef) { diff --git a/llvm/docs/GlobalISel/GenericOpcode.rst b/llvm/docs/GlobalISel/GenericOpcode.rst index bba56d9..b06ce14 100644 --- a/llvm/docs/GlobalISel/GenericOpcode.rst +++ b/llvm/docs/GlobalISel/GenericOpcode.rst @@ -504,6 +504,11 @@ G_FPTOSI, G_FPTOUI, G_SITOFP, G_UITOFP Convert between integer and floating point. +G_FPTOSI_SAT, G_FPTOUI_SAT +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Saturating convert between integer and floating point. + G_FABS ^^^^^^ diff --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst index 11e400c..d51b1c3 100644 --- a/llvm/docs/RISCVUsage.rst +++ b/llvm/docs/RISCVUsage.rst @@ -208,7 +208,7 @@ on support follow. ``Zmmul`` Supported ``Ztso`` Supported ``Zvbb`` Supported - ``Zvbc`` Supported (`See note <#iscv-vector-crypto-note>`__) + ``Zvbc`` Supported (`See note <#riscv-vector-crypto-note>`__) ``Zve32x`` (`Partially <#riscv-vlen-32-note>`__) Supported ``Zve32f`` (`Partially <#riscv-vlen-32-note>`__) Supported ``Zve64x`` Supported @@ -219,18 +219,18 @@ on support follow. ``Zvfh`` Supported ``Zvfhmin`` Supported ``Zvkb`` Supported - ``Zvkg`` Supported (`See note <#iscv-vector-crypto-note>`__) - ``Zvkn`` Supported (`See note <#iscv-vector-crypto-note>`__) - ``Zvknc`` Supported (`See note <#iscv-vector-crypto-note>`__) - ``Zvkned`` Supported (`See note <#iscv-vector-crypto-note>`__) - ``Zvkng`` Supported (`See note <#iscv-vector-crypto-note>`__) - ``Zvknha`` Supported (`See note <#iscv-vector-crypto-note>`__) - ``Zvknhb`` Supported (`See note <#iscv-vector-crypto-note>`__) - ``Zvks`` Supported (`See note <#iscv-vector-crypto-note>`__) - ``Zvksc`` Supported (`See note <#iscv-vector-crypto-note>`__) - ``Zvksed`` Supported (`See note <#iscv-vector-crypto-note>`__) - ``Zvksg`` Supported (`See note <#iscv-vector-crypto-note>`__) - ``Zvksh`` Supported (`See note <#iscv-vector-crypto-note>`__) + ``Zvkg`` Supported (`See note <#riscv-vector-crypto-note>`__) + ``Zvkn`` Supported (`See note <#riscv-vector-crypto-note>`__) + ``Zvknc`` Supported (`See note <#riscv-vector-crypto-note>`__) + ``Zvkned`` Supported (`See note <#riscv-vector-crypto-note>`__) + ``Zvkng`` Supported (`See note <#riscv-vector-crypto-note>`__) + ``Zvknha`` Supported (`See note <#riscv-vector-crypto-note>`__) + ``Zvknhb`` Supported (`See note <#riscv-vector-crypto-note>`__) + ``Zvks`` Supported (`See note <#riscv-vector-crypto-note>`__) + ``Zvksc`` Supported (`See note <#riscv-vector-crypto-note>`__) + ``Zvksed`` Supported (`See note <#riscv-vector-crypto-note>`__) + ``Zvksg`` Supported (`See note <#riscv-vector-crypto-note>`__) + ``Zvksh`` Supported (`See note <#riscv-vector-crypto-note>`__) ``Zvkt`` Supported ``Zvl32b`` (`Partially <#riscv-vlen-32-note>`__) Supported ``Zvl64b`` Supported @@ -437,3 +437,29 @@ line. This currently applies to the following extensions: * ``Zvksg`` * ``Zvksh`` * ``Zvkt`` + +Global Pointer (GP) Relaxation and the Small Data Limit +======================================================= + +Some of the RISC-V psABI variants reserve ``gp`` (``x3``) for use as a "Global Pointer", to make generating data addresses more efficient. + +To use this functionality, you need to be doing all of the following: +* Use the ``medlow`` (aka ``small``) code model; +* Not use the ``gp`` register for any other uses (some platforms use it for the shadow stack and others as a temporary -- as denoted by the ``Tag_RISCV_x3_reg_usage`` build attribute); +* Compile your objects with Clang's ``-mrelax`` option, to enable relaxation annotations on relocatable objects; +* Compile for a position-dependent static executable (not a shared library, and ``-fno-PIC`` / ``-fno-pic`` / ``-fno-pie``); and +* Use LLD's ``--relax-gp`` option. + +LLD will relax (rewrite) any code sequences that materialize an address within 2048 bytes of ``__global_pointer$`` (which will be defined if it is used and does not already exist) to instead generate the address using ``gp`` and the correct (signed) 12-bit immediate. This usually saves at least one instruction compared to materialising a full 32-bit address value. + +There can only be one ``gp`` value in a process (as ``gp`` is not changed when calling into a function in a shared library), so the symbol is is only defined and this relaxation is only done for executables, and not for shared libraries. The linker expects executable startup code to put the value of ``__global_pointer$`` (from the executable) into ``gp`` before any user code is run. + +Arguably, the most efficient use for this addressing mode is for smaller global variables, as larger global variables likely need many more loads or stores when they are being accessed anyway, so the cost of materializing the upper bits can be shared. + +Therefore the compiler can place smaller global variables into sections with names starting with ``.sdata`` or ``.sbss`` (matching sections with names starting with ``.data`` and ``.bss`` respectively). LLD knows to define the ``global_pointer$`` symbol close to these sections, and to lay these sections out adjacent to the ``.data`` section. + +Clang's ``-msmall-data-limit=`` option controls what the threshold size is (in bytes) for a global variable to be considered small. ``-msmall-data-limit=0`` disables the use of sections starting ``.sdata`` and ``.sbss``. The ``-msmall-data-limit=`` option will not move global variables that have an explicit data section, and will keep globals in separate sections if you are using ``-fdata-sections``. + +The small data limit threshold is also used to separate small constants into sections with names starting with ``.srodata``. LLD does not place these with the ``.sdata`` and ``.sbss`` sections as ``.srodata`` sections are read only and the other two are writable. Instead the ``.srodata`` sections are placed adjacent to ``.rodata``. + +Data suggests that these options can produce significant improvements across a range of benchmarks. diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst index 5245689..6df4c37 100644 --- a/llvm/docs/ReleaseNotes.rst +++ b/llvm/docs/ReleaseNotes.rst @@ -123,6 +123,7 @@ Changes to the RISC-V Backend largely untested. * The ``Zvbc32e`` and ``Zvkgs`` extensions are now supported experimentally. * Added ``Smctr`` and ``Ssctr`` extensions. +* ``-mcpu=syntacore-scr7`` was added. Changes to the WebAssembly Backend ---------------------------------- diff --git a/llvm/include/llvm/ADT/DenseMap.h b/llvm/include/llvm/ADT/DenseMap.h index 68498a3..f0f992f 100644 --- a/llvm/include/llvm/ADT/DenseMap.h +++ b/llvm/include/llvm/ADT/DenseMap.h @@ -318,22 +318,6 @@ public: return Ret; } - /// Returns the value associated to the key in the map if it exists. If it - /// does not exist, emplace a default value for the key and returns a - /// reference to the newly created value. - LLVM_DEPRECATED("Use operator[] instead", "[Key]") - ValueT &getOrInsertDefault(KeyT &&Key) { - return try_emplace(Key).first->second; - } - - /// Returns the value associated to the key in the map if it exists. If it - /// does not exist, emplace a default value for the key and returns a - /// reference to the newly created value. - LLVM_DEPRECATED("Use operator[] instead", "[Key]") - ValueT &getOrInsertDefault(const KeyT &Key) { - return try_emplace(Key).first->second; - } - bool erase(const KeyT &Val) { BucketT *TheBucket = doFind(Val); if (!TheBucket) @@ -353,15 +337,6 @@ public: incrementNumTombstones(); } - LLVM_DEPRECATED("Use [Key] instead", "[Key]") - value_type &FindAndConstruct(const KeyT &Key) { - BucketT *TheBucket; - if (LookupBucketFor(Key, TheBucket)) - return *TheBucket; - - return *InsertIntoBucket(TheBucket, Key); - } - ValueT &operator[](const KeyT &Key) { BucketT *TheBucket; if (LookupBucketFor(Key, TheBucket)) @@ -370,15 +345,6 @@ public: return InsertIntoBucket(TheBucket, Key)->second; } - LLVM_DEPRECATED("Use [Key] instead", "[Key]") - value_type &FindAndConstruct(KeyT &&Key) { - BucketT *TheBucket; - if (LookupBucketFor(Key, TheBucket)) - return *TheBucket; - - return *InsertIntoBucket(TheBucket, std::move(Key)); - } - ValueT &operator[](KeyT &&Key) { BucketT *TheBucket; if (LookupBucketFor(Key, TheBucket)) diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h index ef1171d..b7c545e 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h @@ -823,6 +823,8 @@ public: case TargetOpcode::G_FPEXT: case TargetOpcode::G_FPTOSI: case TargetOpcode::G_FPTOUI: + case TargetOpcode::G_FPTOSI_SAT: + case TargetOpcode::G_FPTOUI_SAT: case TargetOpcode::G_FPTRUNC: case TargetOpcode::G_INTTOPTR: case TargetOpcode::G_PTRTOINT: diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h index afd6825..5360850 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h @@ -398,6 +398,7 @@ public: LegalizeResult lowerSITOFP(MachineInstr &MI); LegalizeResult lowerFPTOUI(MachineInstr &MI); LegalizeResult lowerFPTOSI(MachineInstr &MI); + LegalizeResult lowerFPTOINT_SAT(MachineInstr &MI); LegalizeResult lowerFPTRUNC_F64_TO_F16(MachineInstr &MI); LegalizeResult lowerFPTRUNC(MachineInstr &MI); diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h index c0b9d0e..9b99348 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h @@ -2035,6 +2035,16 @@ public: return buildInstr(TargetOpcode::G_FPTOSI, {Dst}, {Src0}); } + /// Build and insert \p Res = G_FPTOUI_SAT \p Src0 + MachineInstrBuilder buildFPTOUI_SAT(const DstOp &Dst, const SrcOp &Src0) { + return buildInstr(TargetOpcode::G_FPTOUI_SAT, {Dst}, {Src0}); + } + + /// Build and insert \p Res = G_FPTOSI_SAT \p Src0 + MachineInstrBuilder buildFPTOSI_SAT(const DstOp &Dst, const SrcOp &Src0) { + return buildInstr(TargetOpcode::G_FPTOSI_SAT, {Dst}, {Src0}); + } + /// Build and insert \p Dst = G_INTRINSIC_ROUNDEVEN \p Src0, \p Src1 MachineInstrBuilder buildIntrinsicRoundeven(const DstOp &Dst, const SrcOp &Src0, diff --git a/llvm/include/llvm/CodeGen/Register.h b/llvm/include/llvm/CodeGen/Register.h index fb4e551..c93b8e1 100644 --- a/llvm/include/llvm/CodeGen/Register.h +++ b/llvm/include/llvm/CodeGen/Register.h @@ -51,7 +51,7 @@ public: /// Compute the frame index from a register value representing a stack slot. static int stackSlot2Index(Register Reg) { assert(Reg.isStack() && "Not a stack slot"); - return int(Reg - MCRegister::FirstStackSlot); + return int(Reg.id() - MCRegister::FirstStackSlot); } /// Convert a non-negative frame index to a stack slot register value. @@ -76,7 +76,7 @@ public: /// The first virtual register in a function will get the index 0. static unsigned virtReg2Index(Register Reg) { assert(Reg.isVirtual() && "Not a virtual register"); - return Reg & ~MCRegister::VirtualRegFlag; + return Reg.id() & ~MCRegister::VirtualRegFlag; } /// Convert a 0-based index to a virtual register number. @@ -132,7 +132,6 @@ public: /// Comparisons against register constants. E.g. /// * R == AArch64::WZR /// * R == 0 - /// * R == VirtRegMap::NO_PHYS_REG constexpr bool operator==(unsigned Other) const { return Reg == Other; } constexpr bool operator!=(unsigned Other) const { return Reg != Other; } constexpr bool operator==(int Other) const { return Reg == unsigned(Other); } diff --git a/llvm/include/llvm/CodeGen/SDPatternMatch.h b/llvm/include/llvm/CodeGen/SDPatternMatch.h index 92efff9..04135ee 100644 --- a/llvm/include/llvm/CodeGen/SDPatternMatch.h +++ b/llvm/include/llvm/CodeGen/SDPatternMatch.h @@ -793,6 +793,10 @@ template <typename Opnd> inline UnaryOpc_match<Opnd> m_FPToSI(const Opnd &Op) { return UnaryOpc_match<Opnd>(ISD::FP_TO_SINT, Op); } +template <typename Opnd> inline UnaryOpc_match<Opnd> m_Ctlz(const Opnd &Op) { + return UnaryOpc_match<Opnd>(ISD::CTLZ, Op); +} + // === Constants === struct ConstantInt_match { APInt *BindVal; diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h index 65c5788..a3bfc63 100644 --- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h +++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h @@ -2287,29 +2287,30 @@ private: /// Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair. template <> struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> { - using RegInfo = DenseMapInfo<unsigned>; + using RegInfo = DenseMapInfo<Register>; + using SubRegInfo = DenseMapInfo<unsigned>; static inline TargetInstrInfo::RegSubRegPair getEmptyKey() { return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(), - RegInfo::getEmptyKey()); + SubRegInfo::getEmptyKey()); } static inline TargetInstrInfo::RegSubRegPair getTombstoneKey() { return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(), - RegInfo::getTombstoneKey()); + SubRegInfo::getTombstoneKey()); } /// Reuse getHashValue implementation from /// std::pair<unsigned, unsigned>. static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) { - std::pair<unsigned, unsigned> PairVal = std::make_pair(Val.Reg, Val.SubReg); - return DenseMapInfo<std::pair<unsigned, unsigned>>::getHashValue(PairVal); + return DenseMapInfo<std::pair<Register, unsigned>>::getHashValue( + std::make_pair(Val.Reg, Val.SubReg)); } static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS, const TargetInstrInfo::RegSubRegPair &RHS) { return RegInfo::isEqual(LHS.Reg, RHS.Reg) && - RegInfo::isEqual(LHS.SubReg, RHS.SubReg); + SubRegInfo::isEqual(LHS.SubReg, RHS.SubReg); } }; diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h index ebf06bc..1a2f31e 100644 --- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h +++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h @@ -1203,19 +1203,6 @@ public: virtual bool isNonallocatableRegisterCalleeSave(MCRegister Reg) const { return false; } - - /// Returns if the architecture being targeted has the required Pseudo - /// Instructions for initializing the register. By default this returns false, - /// but where it is overriden for an architecture, the behaviour will be - /// different. This can either be a check to ensure the Register Class is - /// present, or to return true as an indication the architecture supports the - /// pass. If using the method that does not check for the Register Class, it - /// is imperative to ensure all required Pseudo Instructions are implemented, - /// otherwise compilation may fail with an `Unexpected register class` error. - virtual bool - doesRegClassHavePseudoInitUndef(const TargetRegisterClass *RC) const { - return false; - } }; //===----------------------------------------------------------------------===// diff --git a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h index b4b018f..bfaa645 100644 --- a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h +++ b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h @@ -333,13 +333,13 @@ public: /// Get the list of MacroFusion predicates. virtual std::vector<MacroFusionPredTy> getMacroFusions() const { return {}; }; - /// supportsInitUndef is used to determine if an architecture supports - /// the Init Undef Pass. By default, it is assumed that it will not support - /// the pass, with architecture specific overrides providing the information - /// where they are implemented. - virtual bool supportsInitUndef() const { return false; } + /// Whether the target has instructions where an early-clobber result + /// operand cannot overlap with an undef input operand. + virtual bool requiresDisjointEarlyClobberAndUndef() const { + // Conservatively assume such instructions exist by default. + return true; + } }; - } // end namespace llvm #endif // LLVM_CODEGEN_TARGETSUBTARGETINFO_H diff --git a/llvm/include/llvm/CodeGen/VirtRegMap.h b/llvm/include/llvm/CodeGen/VirtRegMap.h index 864eb23..96c28c1 100644 --- a/llvm/include/llvm/CodeGen/VirtRegMap.h +++ b/llvm/include/llvm/CodeGen/VirtRegMap.h @@ -33,7 +33,6 @@ class TargetInstrInfo; class VirtRegMap : public MachineFunctionPass { public: enum { - NO_PHYS_REG = 0, NO_STACK_SLOT = (1L << 30)-1, MAX_STACK_SLOT = (1L << 18)-1 }; @@ -49,7 +48,7 @@ class TargetInstrInfo; /// it; even spilled virtual registers (the register mapped to a /// spilled register is the temporary used to load it from the /// stack). - IndexedMap<Register, VirtReg2IndexFunctor> Virt2PhysMap; + IndexedMap<MCRegister, VirtReg2IndexFunctor> Virt2PhysMap; /// Virt2StackSlotMap - This is virtual register to stack slot /// mapping. Each spilled virtual register has an entry in it @@ -59,11 +58,11 @@ class TargetInstrInfo; /// Virt2SplitMap - This is virtual register to splitted virtual register /// mapping. - IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2SplitMap; + IndexedMap<Register, VirtReg2IndexFunctor> Virt2SplitMap; /// Virt2ShapeMap - For X86 AMX register whose register is bound shape /// information. - DenseMap<unsigned, ShapeT> Virt2ShapeMap; + DenseMap<Register, ShapeT> Virt2ShapeMap; /// createSpillSlot - Allocate a spill slot for RC from MFI. unsigned createSpillSlot(const TargetRegisterClass *RC); @@ -71,9 +70,7 @@ class TargetInstrInfo; public: static char ID; - VirtRegMap() - : MachineFunctionPass(ID), Virt2PhysMap(NO_PHYS_REG), - Virt2StackSlotMap(NO_STACK_SLOT), Virt2SplitMap(0) {} + VirtRegMap() : MachineFunctionPass(ID), Virt2StackSlotMap(NO_STACK_SLOT) {} VirtRegMap(const VirtRegMap &) = delete; VirtRegMap &operator=(const VirtRegMap &) = delete; @@ -96,15 +93,13 @@ class TargetInstrInfo; /// returns true if the specified virtual register is /// mapped to a physical register - bool hasPhys(Register virtReg) const { - return getPhys(virtReg) != NO_PHYS_REG; - } + bool hasPhys(Register virtReg) const { return getPhys(virtReg).isValid(); } /// returns the physical register mapped to the specified /// virtual register MCRegister getPhys(Register virtReg) const { assert(virtReg.isVirtual()); - return MCRegister::from(Virt2PhysMap[virtReg.id()]); + return Virt2PhysMap[virtReg]; } /// creates a mapping for the specified virtual register to @@ -123,16 +118,16 @@ class TargetInstrInfo; } void assignVirt2Shape(Register virtReg, ShapeT shape) { - Virt2ShapeMap[virtReg.id()] = shape; + Virt2ShapeMap[virtReg] = shape; } /// clears the specified virtual register's, physical /// register mapping void clearVirt(Register virtReg) { assert(virtReg.isVirtual()); - assert(Virt2PhysMap[virtReg.id()] != NO_PHYS_REG && + assert(Virt2PhysMap[virtReg] && "attempt to clear a not assigned virtual register"); - Virt2PhysMap[virtReg.id()] = NO_PHYS_REG; + Virt2PhysMap[virtReg] = MCRegister(); } /// clears all virtual to physical register mappings @@ -151,15 +146,15 @@ class TargetInstrInfo; /// records virtReg is a split live interval from SReg. void setIsSplitFromReg(Register virtReg, Register SReg) { - Virt2SplitMap[virtReg.id()] = SReg; + Virt2SplitMap[virtReg] = SReg; if (hasShape(SReg)) { - Virt2ShapeMap[virtReg.id()] = getShape(SReg); + Virt2ShapeMap[virtReg] = getShape(SReg); } } /// returns the live interval virtReg is split from. Register getPreSplitReg(Register virtReg) const { - return Virt2SplitMap[virtReg.id()]; + return Virt2SplitMap[virtReg]; } /// getOriginal - Return the original virtual register that VirtReg descends @@ -178,15 +173,14 @@ class TargetInstrInfo; return true; // Split register can be assigned a physical register as well as a // stack slot or remat id. - return (Virt2SplitMap[virtReg.id()] && - Virt2PhysMap[virtReg.id()] != NO_PHYS_REG); + return (Virt2SplitMap[virtReg] && Virt2PhysMap[virtReg]); } /// returns the stack slot mapped to the specified virtual /// register int getStackSlot(Register virtReg) const { assert(virtReg.isVirtual()); - return Virt2StackSlotMap[virtReg.id()]; + return Virt2StackSlotMap[virtReg]; } /// create a mapping for the specifed virtual register to diff --git a/llvm/include/llvm/Frontend/OpenMP/OMP.td b/llvm/include/llvm/Frontend/OpenMP/OMP.td index 2cbcbd8..1f747df 100644 --- a/llvm/include/llvm/Frontend/OpenMP/OMP.td +++ b/llvm/include/llvm/Frontend/OpenMP/OMP.td @@ -1192,7 +1192,6 @@ def OMP_DistributeParallelDo : Directive<"distribute parallel do"> { VersionedClause<OMPC_If>, VersionedClause<OMPC_NumThreads>, VersionedClause<OMPC_Order, 50>, - VersionedClause<OMPC_Ordered>, VersionedClause<OMPC_ProcBind>, VersionedClause<OMPC_Schedule>, ]; @@ -1293,7 +1292,6 @@ def OMP_DistributeSimd : Directive<"distribute simd"> { VersionedClause<OMPC_If, 50>, VersionedClause<OMPC_NumThreads>, VersionedClause<OMPC_Order, 50>, - VersionedClause<OMPC_Ordered>, VersionedClause<OMPC_ProcBind>, VersionedClause<OMPC_SafeLen>, VersionedClause<OMPC_Schedule>, @@ -1840,7 +1838,6 @@ def OMP_TargetParallel : Directive<"target parallel"> { def OMP_TargetParallelDo : Directive<"target parallel do"> { let allowedClauses = [ VersionedClause<OMPC_Allocator>, - VersionedClause<OMPC_Copyin>, VersionedClause<OMPC_Default>, VersionedClause<OMPC_Depend>, VersionedClause<OMPC_FirstPrivate>, @@ -1977,7 +1974,6 @@ def OMP_TargetParallelForSimd : Directive<"target parallel for simd"> { def OMP_target_parallel_loop : Directive<"target parallel loop"> { let allowedClauses = [ VersionedClause<OMPC_Allocate>, - VersionedClause<OMPC_Copyin>, VersionedClause<OMPC_Depend>, VersionedClause<OMPC_Device>, VersionedClause<OMPC_FirstPrivate>, @@ -2106,7 +2102,6 @@ def OMP_TargetTeamsDistributeParallelDo : Directive<"target teams distribute parallel do"> { let allowedClauses = [ VersionedClause<OMPC_Allocate>, - VersionedClause<OMPC_Copyin>, VersionedClause<OMPC_Depend>, VersionedClause<OMPC_FirstPrivate>, VersionedClause<OMPC_HasDeviceAddr, 51>, @@ -2115,7 +2110,6 @@ def OMP_TargetTeamsDistributeParallelDo : VersionedClause<OMPC_LastPrivate>, VersionedClause<OMPC_Linear>, VersionedClause<OMPC_Map>, - VersionedClause<OMPC_Ordered>, VersionedClause<OMPC_Private>, VersionedClause<OMPC_Reduction>, VersionedClause<OMPC_Shared>, @@ -2143,7 +2137,6 @@ def OMP_TargetTeamsDistributeParallelDoSimd : let allowedClauses = [ VersionedClause<OMPC_Aligned>, VersionedClause<OMPC_Allocate>, - VersionedClause<OMPC_Copyin>, VersionedClause<OMPC_Depend>, VersionedClause<OMPC_FirstPrivate>, VersionedClause<OMPC_HasDeviceAddr, 51>, @@ -2153,7 +2146,6 @@ def OMP_TargetTeamsDistributeParallelDoSimd : VersionedClause<OMPC_Linear>, VersionedClause<OMPC_Map>, VersionedClause<OMPC_NonTemporal>, - VersionedClause<OMPC_Ordered>, VersionedClause<OMPC_Private>, VersionedClause<OMPC_Reduction>, VersionedClause<OMPC_Shared>, @@ -2395,7 +2387,6 @@ def OMP_TeamsDistributeParallelDo : VersionedClause<OMPC_NumTeams>, VersionedClause<OMPC_NumThreads>, VersionedClause<OMPC_Order, 50>, - VersionedClause<OMPC_Ordered>, VersionedClause<OMPC_ProcBind>, VersionedClause<OMPC_Schedule>, VersionedClause<OMPC_ThreadLimit>, diff --git a/llvm/include/llvm/IR/Function.h b/llvm/include/llvm/IR/Function.h index 866c68d..a4d5528 100644 --- a/llvm/include/llvm/IR/Function.h +++ b/llvm/include/llvm/IR/Function.h @@ -445,6 +445,9 @@ public: /// gets the attribute from the list of attributes. Attribute getAttributeAtIndex(unsigned i, StringRef Kind) const; + /// Check if attribute of the given kind is set at the given index. + bool hasAttributeAtIndex(unsigned Idx, Attribute::AttrKind Kind) const; + /// Return the attribute for the given attribute kind. Attribute getFnAttribute(Attribute::AttrKind Kind) const; diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def index e81752d..55f4719 100644 --- a/llvm/include/llvm/IR/VPIntrinsics.def +++ b/llvm/include/llvm/IR/VPIntrinsics.def @@ -241,6 +241,7 @@ VP_PROPERTY_FUNCTIONAL_INTRINSIC(ctlz) VP_PROPERTY_FUNCTIONAL_SDOPC(CTLZ) END_REGISTER_VP_SDNODE(VP_CTLZ) BEGIN_REGISTER_VP_SDNODE(VP_CTLZ_ZERO_UNDEF, -1, vp_ctlz_zero_undef, 1, 2) +VP_PROPERTY_FUNCTIONAL_SDOPC(CTLZ_ZERO_UNDEF) END_REGISTER_VP_SDNODE(VP_CTLZ_ZERO_UNDEF) END_REGISTER_VP_INTRINSIC(vp_ctlz) diff --git a/llvm/include/llvm/MC/MCRegister.h b/llvm/include/llvm/MC/MCRegister.h index 530c187..dd8bc1e 100644 --- a/llvm/include/llvm/MC/MCRegister.h +++ b/llvm/include/llvm/MC/MCRegister.h @@ -91,7 +91,6 @@ public: /// Comparisons against register constants. E.g. /// * R == AArch64::WZR /// * R == 0 - /// * R == VirtRegMap::NO_PHYS_REG constexpr bool operator==(unsigned Other) const { return Reg == Other; } constexpr bool operator!=(unsigned Other) const { return Reg != Other; } constexpr bool operator==(int Other) const { return Reg == unsigned(Other); } diff --git a/llvm/include/llvm/MC/MCStreamer.h b/llvm/include/llvm/MC/MCStreamer.h index 78aa120..42b1114 100644 --- a/llvm/include/llvm/MC/MCStreamer.h +++ b/llvm/include/llvm/MC/MCStreamer.h @@ -148,7 +148,7 @@ public: int64_t Offset = 0); virtual void emitMovSP(unsigned Reg, int64_t Offset = 0); virtual void emitPad(int64_t Offset); - virtual void emitRegSave(const SmallVectorImpl<unsigned> &RegList, + virtual void emitRegSave(const SmallVectorImpl<MCRegister> &RegList, bool isVector); virtual void emitUnwindRaw(int64_t StackOffset, const SmallVectorImpl<uint8_t> &Opcodes); diff --git a/llvm/include/llvm/MCA/HardwareUnits/ResourceManager.h b/llvm/include/llvm/MCA/HardwareUnits/ResourceManager.h index 0e3f16d..f71928f 100644 --- a/llvm/include/llvm/MCA/HardwareUnits/ResourceManager.h +++ b/llvm/include/llvm/MCA/HardwareUnits/ResourceManager.h @@ -232,6 +232,8 @@ public: /// `NumUnits` available units. bool isReady(unsigned NumUnits = 1) const; + uint64_t getNumReadyUnits() const { return llvm::popcount(ReadyMask); } + bool isAResourceGroup() const { return IsAGroup; } bool containsResource(uint64_t ID) const { return ResourceMask & ID; } @@ -428,9 +430,32 @@ public: uint64_t getProcResUnitMask() const { return ProcResUnitMask; } uint64_t getAvailableProcResUnits() const { return AvailableProcResUnits; } - void issueInstruction( - const InstrDesc &Desc, - SmallVectorImpl<std::pair<ResourceRef, ReleaseAtCycles>> &Pipes); + using ResourceWithCycles = std::pair<ResourceRef, ReleaseAtCycles>; + + void issueInstruction(const InstrDesc &Desc, + SmallVectorImpl<ResourceWithCycles> &Pipes) { + if (Desc.HasPartiallyOverlappingGroups) + return issueInstructionImpl(Desc, Pipes); + + return fastIssueInstruction(Desc, Pipes); + } + + // Selects pipeline resources consumed by an instruction. + // This method works under the assumption that used group resources don't + // partially overlap. The logic is guaranteed to find a valid resource unit + // schedule, no matter in which order individual uses are processed. For that + // reason, the vector of resource uses is simply (and quickly) processed in + // sequence. The resulting schedule is eventually stored into vector `Pipes`. + void fastIssueInstruction(const InstrDesc &Desc, + SmallVectorImpl<ResourceWithCycles> &Pipes); + + // Selects pipeline resources consumed by an instruction. + // This method works under the assumption that used resource groups may + // partially overlap. This complicates the selection process, because the + // order in which uses are processed matters. The logic internally prioritizes + // groups which are more constrained than others. + void issueInstructionImpl(const InstrDesc &Desc, + SmallVectorImpl<ResourceWithCycles> &Pipes); void cycleEvent(SmallVectorImpl<ResourceRef> &ResourcesFreed); diff --git a/llvm/include/llvm/Passes/PassBuilder.h b/llvm/include/llvm/Passes/PassBuilder.h index e1d78a8..e6ced0c 100644 --- a/llvm/include/llvm/Passes/PassBuilder.h +++ b/llvm/include/llvm/Passes/PassBuilder.h @@ -25,7 +25,6 @@ #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/IPO/Inliner.h" #include "llvm/Transforms/IPO/ModuleInliner.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Scalar/LoopPassManager.h" #include <optional> #include <vector> diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def index a3692a5..9e70eb8 100644 --- a/llvm/include/llvm/Support/TargetOpcodes.def +++ b/llvm/include/llvm/Support/TargetOpcodes.def @@ -684,6 +684,12 @@ HANDLE_TARGET_OPCODE(G_SITOFP) /// Generic unsigned-int to float conversion HANDLE_TARGET_OPCODE(G_UITOFP) +/// Generic saturating float to signed-int conversion +HANDLE_TARGET_OPCODE(G_FPTOSI_SAT) + +/// Generic saturating float to unsigned-int conversion +HANDLE_TARGET_OPCODE(G_FPTOUI_SAT) + /// Generic FP absolute value. HANDLE_TARGET_OPCODE(G_FABS) diff --git a/llvm/include/llvm/TableGen/Record.h b/llvm/include/llvm/TableGen/Record.h index dc86f28..c9e01e3 100644 --- a/llvm/include/llvm/TableGen/Record.h +++ b/llvm/include/llvm/TableGen/Record.h @@ -1912,6 +1912,9 @@ public: /// vector of records, throwing an exception if the field does not exist or /// if the value is not the right type. std::vector<Record*> getValueAsListOfDefs(StringRef FieldName) const; + // Temporary function to help staged migration to const Record pointers. + std::vector<const Record *> + getValueAsListOfConstDefs(StringRef FieldName) const; /// This method looks up the specified field and returns its value as a /// vector of integers, throwing an exception if the field does not exist or diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td index f4934af..a55d9d3 100644 --- a/llvm/include/llvm/Target/GenericOpcodes.td +++ b/llvm/include/llvm/Target/GenericOpcodes.td @@ -769,6 +769,18 @@ def G_UITOFP : GenericInstruction { let hasSideEffects = false; } +def G_FPTOSI_SAT : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type1:$src); + let hasSideEffects = false; +} + +def G_FPTOUI_SAT : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type1:$src); + let hasSideEffects = false; +} + def G_FABS : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src); diff --git a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td index 9344487..d9121cf 100644 --- a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td +++ b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td @@ -98,6 +98,8 @@ def : GINodeEquiv<G_FPTOSI, fp_to_sint>; def : GINodeEquiv<G_FPTOUI, fp_to_uint>; def : GINodeEquiv<G_SITOFP, sint_to_fp>; def : GINodeEquiv<G_UITOFP, uint_to_fp>; +def : GINodeEquiv<G_FPTOSI_SAT, fp_to_sint_sat_gi>; +def : GINodeEquiv<G_FPTOUI_SAT, fp_to_uint_sat_gi>; def : GINodeEquiv<G_FADD, fadd>; def : GINodeEquiv<G_FSUB, fsub>; def : GINodeEquiv<G_FMA, fma>; diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td index 759fd78..adf8a75 100644 --- a/llvm/include/llvm/Target/TargetSelectionDAG.td +++ b/llvm/include/llvm/Target/TargetSelectionDAG.td @@ -569,6 +569,8 @@ def fp_to_sint : SDNode<"ISD::FP_TO_SINT" , SDTFPToIntOp>; def fp_to_uint : SDNode<"ISD::FP_TO_UINT" , SDTFPToIntOp>; def fp_to_sint_sat : SDNode<"ISD::FP_TO_SINT_SAT" , SDTFPToIntSatOp>; def fp_to_uint_sat : SDNode<"ISD::FP_TO_UINT_SAT" , SDTFPToIntSatOp>; +def fp_to_sint_sat_gi : SDNode<"ISD::FP_TO_SINT_SAT" , SDTFPToIntOp>; +def fp_to_uint_sat_gi : SDNode<"ISD::FP_TO_UINT_SAT" , SDTFPToIntOp>; def f16_to_fp : SDNode<"ISD::FP16_TO_FP" , SDTIntToFPOp>; def fp_to_f16 : SDNode<"ISD::FP_TO_FP16" , SDTFPToIntOp>; def bf16_to_fp : SDNode<"ISD::BF16_TO_FP" , SDTIntToFPOp>; diff --git a/llvm/include/llvm/Transforms/Instrumentation/CFGMST.h b/llvm/include/llvm/Transforms/Instrumentation/CFGMST.h index 682ae87..35b3d61 100644 --- a/llvm/include/llvm/Transforms/Instrumentation/CFGMST.h +++ b/llvm/include/llvm/Transforms/Instrumentation/CFGMST.h @@ -280,13 +280,13 @@ public: std::tie(Iter, Inserted) = BBInfos.insert(std::make_pair(Src, nullptr)); if (Inserted) { // Newly inserted, update the real info. - Iter->second = std::move(std::make_unique<BBInfo>(Index)); + Iter->second = std::make_unique<BBInfo>(Index); Index++; } std::tie(Iter, Inserted) = BBInfos.insert(std::make_pair(Dest, nullptr)); if (Inserted) // Newly inserted, update the real info. - Iter->second = std::move(std::make_unique<BBInfo>(Index)); + Iter->second = std::make_unique<BBInfo>(Index); AllEdges.emplace_back(new Edge(Src, Dest, W)); return *AllEdges.back(); } diff --git a/llvm/include/llvm/Transforms/Instrumentation/GCOVProfiler.h b/llvm/include/llvm/Transforms/Instrumentation/GCOVProfiler.h index e5b4520..f933332 100644 --- a/llvm/include/llvm/Transforms/Instrumentation/GCOVProfiler.h +++ b/llvm/include/llvm/Transforms/Instrumentation/GCOVProfiler.h @@ -13,7 +13,7 @@ #define LLVM_TRANSFORMS_INSTRUMENTATION_GCOVPROFILER_H #include "llvm/IR/PassManager.h" -#include "llvm/Transforms/Instrumentation.h" +#include "llvm/Transforms/Utils/Instrumentation.h" namespace llvm { /// The gcov-style instrumentation pass diff --git a/llvm/include/llvm/Transforms/Instrumentation/InstrProfiling.h b/llvm/include/llvm/Transforms/Instrumentation/InstrProfiling.h index 0dd37c9..2042700 100644 --- a/llvm/include/llvm/Transforms/Instrumentation/InstrProfiling.h +++ b/llvm/include/llvm/Transforms/Instrumentation/InstrProfiling.h @@ -14,7 +14,7 @@ #define LLVM_TRANSFORMS_INSTRUMENTATION_INSTRPROFILING_H #include "llvm/IR/PassManager.h" -#include "llvm/Transforms/Instrumentation.h" +#include "llvm/Transforms/Utils/Instrumentation.h" namespace llvm { diff --git a/llvm/include/llvm/Transforms/Instrumentation/SanitizerBinaryMetadata.h b/llvm/include/llvm/Transforms/Instrumentation/SanitizerBinaryMetadata.h index 800a1d5..9efa231 100644 --- a/llvm/include/llvm/Transforms/Instrumentation/SanitizerBinaryMetadata.h +++ b/llvm/include/llvm/Transforms/Instrumentation/SanitizerBinaryMetadata.h @@ -16,7 +16,7 @@ #include "llvm/IR/Function.h" #include "llvm/IR/Module.h" #include "llvm/IR/PassManager.h" -#include "llvm/Transforms/Instrumentation.h" +#include "llvm/Transforms/Utils/Instrumentation.h" namespace llvm { diff --git a/llvm/include/llvm/Transforms/Instrumentation/SanitizerCoverage.h b/llvm/include/llvm/Transforms/Instrumentation/SanitizerCoverage.h index 89ab4fd..a49a8cb 100644 --- a/llvm/include/llvm/Transforms/Instrumentation/SanitizerCoverage.h +++ b/llvm/include/llvm/Transforms/Instrumentation/SanitizerCoverage.h @@ -18,7 +18,7 @@ #include "llvm/IR/PassManager.h" #include "llvm/Support/SpecialCaseList.h" #include "llvm/Support/VirtualFileSystem.h" -#include "llvm/Transforms/Instrumentation.h" +#include "llvm/Transforms/Utils/Instrumentation.h" namespace llvm { class Module; diff --git a/llvm/include/llvm/Transforms/Instrumentation.h b/llvm/include/llvm/Transforms/Utils/Instrumentation.h index 1a4824a..1a4824a 100644 --- a/llvm/include/llvm/Transforms/Instrumentation.h +++ b/llvm/include/llvm/Transforms/Utils/Instrumentation.h diff --git a/llvm/include/llvm/Transforms/Utils/SCCPSolver.h b/llvm/include/llvm/Transforms/Utils/SCCPSolver.h index 61a500b..696f39c 100644 --- a/llvm/include/llvm/Transforms/Utils/SCCPSolver.h +++ b/llvm/include/llvm/Transforms/Utils/SCCPSolver.h @@ -104,6 +104,8 @@ public: /// argument-tracked functions. bool isArgumentTrackedFunction(Function *F); + const SmallPtrSetImpl<Function *> &getArgumentTrackedFunctions() const; + /// Solve - Solve for constants and executable blocks. void solve(); @@ -191,6 +193,7 @@ public: BasicBlock *&NewUnreachableBB) const; void inferReturnAttributes() const; + void inferArgAttributes() const; bool tryToReplaceWithConstant(Value *V); diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index 7fbefd0..5710bda 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -2340,6 +2340,14 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, MachineInstr::copyFlagsFromInstruction(CI)); return true; } + case Intrinsic::fptosi_sat: + MIRBuilder.buildFPTOSI_SAT(getOrCreateVReg(CI), + getOrCreateVReg(*CI.getArgOperand(0))); + return true; + case Intrinsic::fptoui_sat: + MIRBuilder.buildFPTOUI_SAT(getOrCreateVReg(CI), + getOrCreateVReg(*CI.getArgOperand(0))); + return true; case Intrinsic::memcpy_inline: return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE); case Intrinsic::memcpy: @@ -3528,11 +3536,13 @@ bool IRTranslator::translate(const Constant &C, Register Reg) { Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator()); EntryBuilder->buildConstantPtrAuth(Reg, CPA, Addr, AddrDisc); } else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) { - if (!isa<FixedVectorType>(CAZ->getType())) - return false; + Constant &Elt = *CAZ->getElementValue(0u); + if (isa<ScalableVectorType>(CAZ->getType())) { + EntryBuilder->buildSplatVector(Reg, getOrCreateVReg(Elt)); + return true; + } // Return the scalar if it is a <1 x Ty> vector. unsigned NumElts = CAZ->getElementCount().getFixedValue(); - Constant &Elt = *CAZ->getElementValue(0u); if (NumElts == 1) return translateCopy(C, Elt, *EntryBuilder); // All elements are zero so we can just use the first one. diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp index 3640b77..01e47bd 100644 --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -1880,6 +1880,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI, } case TargetOpcode::G_FPTOUI: case TargetOpcode::G_FPTOSI: + case TargetOpcode::G_FPTOUI_SAT: + case TargetOpcode::G_FPTOSI_SAT: return narrowScalarFPTOI(MI, TypeIdx, NarrowTy); case TargetOpcode::G_FPEXT: if (TypeIdx != 0) @@ -2874,6 +2876,47 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) { Observer.changedInstr(MI); return Legalized; + case TargetOpcode::G_FPTOSI_SAT: + case TargetOpcode::G_FPTOUI_SAT: + Observer.changingInstr(MI); + + if (TypeIdx == 0) { + Register OldDst = MI.getOperand(0).getReg(); + LLT Ty = MRI.getType(OldDst); + Register ExtReg = MRI.createGenericVirtualRegister(WideTy); + Register NewDst; + MI.getOperand(0).setReg(ExtReg); + uint64_t ShortBits = Ty.getScalarSizeInBits(); + uint64_t WideBits = WideTy.getScalarSizeInBits(); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + if (Opcode == TargetOpcode::G_FPTOSI_SAT) { + // z = i16 fptosi_sat(a) + // -> + // x = i32 fptosi_sat(a) + // y = smin(x, 32767) + // z = smax(y, -32768) + auto MaxVal = MIRBuilder.buildConstant( + WideTy, APInt::getSignedMaxValue(ShortBits).sext(WideBits)); + auto MinVal = MIRBuilder.buildConstant( + WideTy, APInt::getSignedMinValue(ShortBits).sext(WideBits)); + Register MidReg = + MIRBuilder.buildSMin(WideTy, ExtReg, MaxVal).getReg(0); + NewDst = MIRBuilder.buildSMax(WideTy, MidReg, MinVal).getReg(0); + } else { + // z = i16 fptoui_sat(a) + // -> + // x = i32 fptoui_sat(a) + // y = smin(x, 65535) + auto MaxVal = MIRBuilder.buildConstant( + WideTy, APInt::getAllOnes(ShortBits).zext(WideBits)); + NewDst = MIRBuilder.buildUMin(WideTy, ExtReg, MaxVal).getReg(0); + } + MIRBuilder.buildTrunc(OldDst, NewDst); + } else + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_FPEXT); + + Observer.changedInstr(MI); + return Legalized; case TargetOpcode::G_LOAD: case TargetOpcode::G_SEXTLOAD: case TargetOpcode::G_ZEXTLOAD: @@ -4170,6 +4213,9 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) { return lowerFPTOUI(MI); case G_FPTOSI: return lowerFPTOSI(MI); + case G_FPTOUI_SAT: + case G_FPTOSI_SAT: + return lowerFPTOINT_SAT(MI); case G_FPTRUNC: return lowerFPTRUNC(MI); case G_FPOWI: @@ -4986,6 +5032,8 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx, case G_UITOFP: case G_FPTOSI: case G_FPTOUI: + case G_FPTOSI_SAT: + case G_FPTOUI_SAT: case G_INTTOPTR: case G_PTRTOINT: case G_ADDRSPACE_CAST: @@ -5777,6 +5825,8 @@ LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx, case TargetOpcode::G_FPEXT: case TargetOpcode::G_FPTOSI: case TargetOpcode::G_FPTOUI: + case TargetOpcode::G_FPTOSI_SAT: + case TargetOpcode::G_FPTOUI_SAT: case TargetOpcode::G_SITOFP: case TargetOpcode::G_UITOFP: { Observer.changingInstr(MI); @@ -7285,6 +7335,106 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerFPTOSI(MachineInstr &MI) { return Legalized; } +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerFPTOINT_SAT(MachineInstr &MI) { + auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs(); + + bool IsSigned = MI.getOpcode() == TargetOpcode::G_FPTOSI_SAT; + unsigned SatWidth = DstTy.getScalarSizeInBits(); + + // Determine minimum and maximum integer values and their corresponding + // floating-point values. + APInt MinInt, MaxInt; + if (IsSigned) { + MinInt = APInt::getSignedMinValue(SatWidth); + MaxInt = APInt::getSignedMaxValue(SatWidth); + } else { + MinInt = APInt::getMinValue(SatWidth); + MaxInt = APInt::getMaxValue(SatWidth); + } + + const fltSemantics &Semantics = getFltSemanticForLLT(SrcTy.getScalarType()); + APFloat MinFloat(Semantics); + APFloat MaxFloat(Semantics); + + APFloat::opStatus MinStatus = + MinFloat.convertFromAPInt(MinInt, IsSigned, APFloat::rmTowardZero); + APFloat::opStatus MaxStatus = + MaxFloat.convertFromAPInt(MaxInt, IsSigned, APFloat::rmTowardZero); + bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact) && + !(MaxStatus & APFloat::opStatus::opInexact); + + // If the integer bounds are exactly representable as floats, emit a + // min+max+fptoi sequence. Otherwise we have to use a sequence of comparisons + // and selects. + if (AreExactFloatBounds) { + // Clamp Src by MinFloat from below. If Src is NaN the result is MinFloat. + auto MaxC = MIRBuilder.buildFConstant(SrcTy, MinFloat); + auto MaxP = MIRBuilder.buildFCmp(CmpInst::FCMP_ULT, + SrcTy.changeElementSize(1), Src, MaxC); + auto Max = MIRBuilder.buildSelect(SrcTy, MaxP, Src, MaxC); + // Clamp by MaxFloat from above. NaN cannot occur. + auto MinC = MIRBuilder.buildFConstant(SrcTy, MaxFloat); + auto MinP = + MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, SrcTy.changeElementSize(1), Max, + MinC, MachineInstr::FmNoNans); + auto Min = + MIRBuilder.buildSelect(SrcTy, MinP, Max, MinC, MachineInstr::FmNoNans); + // Convert clamped value to integer. In the unsigned case we're done, + // because we mapped NaN to MinFloat, which will cast to zero. + if (!IsSigned) { + MIRBuilder.buildFPTOUI(Dst, Min); + MI.eraseFromParent(); + return Legalized; + } + + // Otherwise, select 0 if Src is NaN. + auto FpToInt = MIRBuilder.buildFPTOSI(DstTy, Min); + auto IsZero = MIRBuilder.buildFCmp(CmpInst::FCMP_UNO, + DstTy.changeElementSize(1), Src, Src); + MIRBuilder.buildSelect(Dst, IsZero, MIRBuilder.buildConstant(DstTy, 0), + FpToInt); + MI.eraseFromParent(); + return Legalized; + } + + // Result of direct conversion. The assumption here is that the operation is + // non-trapping and it's fine to apply it to an out-of-range value if we + // select it away later. + auto FpToInt = IsSigned ? MIRBuilder.buildFPTOSI(DstTy, Src) + : MIRBuilder.buildFPTOUI(DstTy, Src); + + // If Src ULT MinFloat, select MinInt. In particular, this also selects + // MinInt if Src is NaN. + auto ULT = + MIRBuilder.buildFCmp(CmpInst::FCMP_ULT, SrcTy.changeElementSize(1), Src, + MIRBuilder.buildFConstant(SrcTy, MinFloat)); + auto Max = MIRBuilder.buildSelect( + DstTy, ULT, MIRBuilder.buildConstant(DstTy, MinInt), FpToInt); + // If Src OGT MaxFloat, select MaxInt. + auto OGT = + MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, SrcTy.changeElementSize(1), Src, + MIRBuilder.buildFConstant(SrcTy, MaxFloat)); + + // In the unsigned case we are done, because we mapped NaN to MinInt, which + // is already zero. + if (!IsSigned) { + MIRBuilder.buildSelect(Dst, OGT, MIRBuilder.buildConstant(DstTy, MaxInt), + Max); + MI.eraseFromParent(); + return Legalized; + } + + // Otherwise, select 0 if Src is NaN. + auto Min = MIRBuilder.buildSelect( + DstTy, OGT, MIRBuilder.buildConstant(DstTy, MaxInt), Max); + auto IsZero = MIRBuilder.buildFCmp(CmpInst::FCMP_UNO, + DstTy.changeElementSize(1), Src, Src); + MIRBuilder.buildSelect(Dst, IsZero, MIRBuilder.buildConstant(DstTy, 0), Min); + MI.eraseFromParent(); + return Legalized; +} + // f64 -> f16 conversion using round-to-nearest-even rounding mode. LegalizerHelper::LegalizeResult LegalizerHelper::lowerFPTRUNC_F64_TO_F16(MachineInstr &MI) { diff --git a/llvm/lib/CodeGen/InitUndef.cpp b/llvm/lib/CodeGen/InitUndef.cpp index a89c823..911e8bb 100644 --- a/llvm/lib/CodeGen/InitUndef.cpp +++ b/llvm/lib/CodeGen/InitUndef.cpp @@ -120,8 +120,6 @@ bool InitUndef::handleReg(MachineInstr *MI) { continue; if (!UseMO.getReg().isVirtual()) continue; - if (!TRI->doesRegClassHavePseudoInitUndef(MRI->getRegClass(UseMO.getReg()))) - continue; if (UseMO.isUndef() || findImplictDefMIFromReg(UseMO.getReg(), MRI)) Changed |= fixupIllOperand(MI, UseMO); @@ -140,8 +138,6 @@ bool InitUndef::handleSubReg(MachineFunction &MF, MachineInstr &MI, continue; if (UseMO.isTied()) continue; - if (!TRI->doesRegClassHavePseudoInitUndef(MRI->getRegClass(UseMO.getReg()))) - continue; Register Reg = UseMO.getReg(); if (NewRegs.count(Reg)) @@ -246,13 +242,9 @@ bool InitUndef::processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB, bool InitUndef::runOnMachineFunction(MachineFunction &MF) { ST = &MF.getSubtarget(); - // supportsInitUndef is implemented to reflect if an architecture has support - // for the InitUndef pass. Support comes from having the relevant Pseudo - // instructions that can be used to initialize the register. The function - // returns false by default so requires an implementation per architecture. - // Support can be added by overriding the function in a way that best fits - // the architecture. - if (!ST->supportsInitUndef()) + // The pass is only needed if early-clobber defs and undef ops cannot be + // allocated to the same register. + if (!ST->requiresDisjointEarlyClobberAndUndef()) return false; MRI = &MF.getRegInfo(); diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index fe8ae5c..cd39cb0 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -3764,6 +3764,79 @@ SDValue DAGCombiner::foldSubToUSubSat(EVT DstVT, SDNode *N, const SDLoc &DL) { return SDValue(); } +// Refinement of DAG/Type Legalisation (promotion) when CTLZ is used for +// counting leading ones. Broadly, it replaces the substraction with a left +// shift. +// +// * DAG Legalisation Pattern: +// +// (sub (ctlz (zeroextend (not Src))) +// BitWidthDiff) +// +// if BitWidthDiff == BitWidth(Node) - BitWidth(Src) +// --> +// +// (ctlz_zero_undef (not (shl (anyextend Src) +// BitWidthDiff))) +// +// * Type Legalisation Pattern: +// +// (sub (ctlz (and (xor Src XorMask) +// AndMask)) +// BitWidthDiff) +// +// if AndMask has only trailing ones +// and MaskBitWidth(AndMask) == BitWidth(Node) - BitWidthDiff +// and XorMask has more trailing ones than AndMask +// --> +// +// (ctlz_zero_undef (not (shl Src BitWidthDiff))) +template <class MatchContextClass> +static SDValue foldSubCtlzNot(SDNode *N, SelectionDAG &DAG) { + const SDLoc DL(N); + SDValue N0 = N->getOperand(0); + EVT VT = N0.getValueType(); + unsigned BitWidth = VT.getScalarSizeInBits(); + + MatchContextClass Matcher(DAG, DAG.getTargetLoweringInfo(), N); + + APInt AndMask; + APInt XorMask; + APInt BitWidthDiff; + + SDValue CtlzOp; + SDValue Src; + + if (!sd_context_match( + N, Matcher, m_Sub(m_Ctlz(m_Value(CtlzOp)), m_ConstInt(BitWidthDiff)))) + return SDValue(); + + if (sd_context_match(CtlzOp, Matcher, m_ZExt(m_Not(m_Value(Src))))) { + // DAG Legalisation Pattern: + // (sub (ctlz (zero_extend (not Op)) BitWidthDiff)) + if ((BitWidth - Src.getValueType().getScalarSizeInBits()) != BitWidthDiff) + return SDValue(); + + Src = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Src); + } else if (sd_context_match(CtlzOp, Matcher, + m_And(m_Xor(m_Value(Src), m_ConstInt(XorMask)), + m_ConstInt(AndMask)))) { + // Type Legalisation Pattern: + // (sub (ctlz (and (xor Op XorMask) AndMask)) BitWidthDiff) + unsigned AndMaskWidth = BitWidth - BitWidthDiff.getZExtValue(); + if (!(AndMask.isMask(AndMaskWidth) && XorMask.countr_one() >= AndMaskWidth)) + return SDValue(); + } else + return SDValue(); + + SDValue ShiftConst = DAG.getShiftAmountConstant(BitWidthDiff, VT, DL); + SDValue LShift = Matcher.getNode(ISD::SHL, DL, VT, Src, ShiftConst); + SDValue Not = + Matcher.getNode(ISD::XOR, DL, VT, LShift, DAG.getAllOnesConstant(DL, VT)); + + return Matcher.getNode(ISD::CTLZ_ZERO_UNDEF, DL, VT, Not); +} + // Since it may not be valid to emit a fold to zero for vector initializers // check if we can before folding. static SDValue tryFoldToZero(const SDLoc &DL, const TargetLowering &TLI, EVT VT, @@ -3788,6 +3861,9 @@ SDValue DAGCombiner::visitSUB(SDNode *N) { return N; }; + if (SDValue V = foldSubCtlzNot<EmptyMatchContext>(N, DAG)) + return V; + // fold (sub x, x) -> 0 // FIXME: Refactor this and xor and other similar operations together. if (PeekThroughFreeze(N0) == PeekThroughFreeze(N1)) @@ -4897,6 +4973,12 @@ SDValue DAGCombiner::visitUDIV(SDNode *N) { if (SDValue DivRem = useDivRem(N)) return DivRem; + // Simplify the operands using demanded-bits information. + // We don't have demanded bits support for UDIV so this just enables constant + // folding based on known bits. + if (SimplifyDemandedBits(SDValue(N, 0))) + return SDValue(N, 0); + return SDValue(); } @@ -16052,8 +16134,8 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) { // Floating-point multiply-add without intermediate rounding. bool HasFMA = - TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT) && - (!LegalOperations || matcher.isOperationLegalOrCustom(ISD::FMA, VT)); + (!LegalOperations || matcher.isOperationLegalOrCustom(ISD::FMA, VT)) && + TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT); // No valid opcode, do not combine. if (!HasFMAD && !HasFMA) @@ -16289,8 +16371,8 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) { // Floating-point multiply-add without intermediate rounding. bool HasFMA = - TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT) && - (!LegalOperations || matcher.isOperationLegalOrCustom(ISD::FMA, VT)); + (!LegalOperations || matcher.isOperationLegalOrCustom(ISD::FMA, VT)) && + TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT); // No valid opcode, do not combine. if (!HasFMAD && !HasFMA) @@ -16620,8 +16702,8 @@ SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) { // Floating-point multiply-add without intermediate rounding. bool HasFMA = isContractableFMUL(Options, SDValue(N, 0)) && - TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT) && - (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT)); + (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT)) && + TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT); // Floating-point multiply-add with intermediate rounding. This can result // in a less precise result due to the changed rounding order. @@ -26983,6 +27065,8 @@ SDValue DAGCombiner::visitVPOp(SDNode *N) { return visitVP_SELECT(N); case ISD::VP_MUL: return visitMUL<VPMatchContext>(N); + case ISD::VP_SUB: + return foldSubCtlzNot<VPMatchContext>(N, DAG); default: break; } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 29505f4..44ec6f7 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -6799,14 +6799,17 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, // Constant fold the scalar operands. SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); - // Legalize the (integer) scalar constant if necessary. - if (LegalSVT != SVT) - ScalarResult = getNode(ExtendCode, DL, LegalSVT, ScalarResult); - // Scalar folding only succeeded if the result is a constant or UNDEF. if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && ScalarResult.getOpcode() != ISD::ConstantFP) return SDValue(); + + // Legalize the (integer) scalar constant if necessary. We only do + // this once we know the folding succeeded, since otherwise we would + // get a node with illegal type which has a user. + if (LegalSVT != SVT) + ScalarResult = getNode(ExtendCode, DL, LegalSVT, ScalarResult); + ScalarResults.push_back(ScalarResult); } diff --git a/llvm/lib/CodeGen/StackSlotColoring.cpp b/llvm/lib/CodeGen/StackSlotColoring.cpp index df06577..2b43e90 100644 --- a/llvm/lib/CodeGen/StackSlotColoring.cpp +++ b/llvm/lib/CodeGen/StackSlotColoring.cpp @@ -472,8 +472,8 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) { MachineBasicBlock::iterator NextMI = std::next(I); MachineBasicBlock::iterator ProbableLoadMI = I; - unsigned LoadReg = 0; - unsigned StoreReg = 0; + Register LoadReg; + Register StoreReg; unsigned LoadSize = 0; unsigned StoreSize = 0; if (!(LoadReg = TII->isLoadFromStackSlot(*I, FirstSS, LoadSize))) diff --git a/llvm/lib/CodeGen/VirtRegMap.cpp b/llvm/lib/CodeGen/VirtRegMap.cpp index 4acc4f8..a548bf6 100644 --- a/llvm/lib/CodeGen/VirtRegMap.cpp +++ b/llvm/lib/CodeGen/VirtRegMap.cpp @@ -84,12 +84,12 @@ void VirtRegMap::grow() { void VirtRegMap::assignVirt2Phys(Register virtReg, MCPhysReg physReg) { assert(virtReg.isVirtual() && Register::isPhysicalRegister(physReg)); - assert(Virt2PhysMap[virtReg.id()] == NO_PHYS_REG && + assert(!Virt2PhysMap[virtReg] && "attempt to assign physical register to already mapped " "virtual register"); assert(!getRegInfo().isReserved(physReg) && "Attempt to map virtReg to a reserved physReg"); - Virt2PhysMap[virtReg.id()] = physReg; + Virt2PhysMap[virtReg] = physReg; } unsigned VirtRegMap::createSpillSlot(const TargetRegisterClass *RC) { @@ -126,27 +126,27 @@ bool VirtRegMap::hasKnownPreference(Register VirtReg) const { int VirtRegMap::assignVirt2StackSlot(Register virtReg) { assert(virtReg.isVirtual()); - assert(Virt2StackSlotMap[virtReg.id()] == NO_STACK_SLOT && + assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && "attempt to assign stack slot to already spilled register"); const TargetRegisterClass* RC = MF->getRegInfo().getRegClass(virtReg); - return Virt2StackSlotMap[virtReg.id()] = createSpillSlot(RC); + return Virt2StackSlotMap[virtReg] = createSpillSlot(RC); } void VirtRegMap::assignVirt2StackSlot(Register virtReg, int SS) { assert(virtReg.isVirtual()); - assert(Virt2StackSlotMap[virtReg.id()] == NO_STACK_SLOT && + assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && "attempt to assign stack slot to already spilled register"); assert((SS >= 0 || (SS >= MF->getFrameInfo().getObjectIndexBegin())) && "illegal fixed frame index"); - Virt2StackSlotMap[virtReg.id()] = SS; + Virt2StackSlotMap[virtReg] = SS; } void VirtRegMap::print(raw_ostream &OS, const Module*) const { OS << "********** REGISTER MAP **********\n"; for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { Register Reg = Register::index2VirtReg(i); - if (Virt2PhysMap[Reg] != (unsigned)VirtRegMap::NO_PHYS_REG) { + if (Virt2PhysMap[Reg]) { OS << '[' << printReg(Reg, TRI) << " -> " << printReg(Virt2PhysMap[Reg], TRI) << "] " << TRI->getRegClassName(MRI->getRegClass(Reg)) << "\n"; @@ -347,8 +347,8 @@ void VirtRegRewriter::addMBBLiveIns() { continue; // This is a virtual register that is live across basic blocks. Its // assigned PhysReg must be marked as live-in to those blocks. - Register PhysReg = VRM->getPhys(VirtReg); - if (PhysReg == VirtRegMap::NO_PHYS_REG) { + MCRegister PhysReg = VRM->getPhys(VirtReg); + if (!PhysReg) { // There may be no physical register assigned if only some register // classes were already allocated. assert(!ClearVirtRegs && "Unmapped virtual register"); @@ -551,7 +551,7 @@ void VirtRegRewriter::rewrite() { continue; Register VirtReg = MO.getReg(); MCRegister PhysReg = VRM->getPhys(VirtReg); - if (PhysReg == VirtRegMap::NO_PHYS_REG) + if (!PhysReg) continue; assert(Register(PhysReg).isPhysical()); diff --git a/llvm/lib/DWARFLinker/Parallel/OutputSections.h b/llvm/lib/DWARFLinker/Parallel/OutputSections.h index d2e4622..da47f53 100644 --- a/llvm/lib/DWARFLinker/Parallel/OutputSections.h +++ b/llvm/lib/DWARFLinker/Parallel/OutputSections.h @@ -371,16 +371,11 @@ public: /// If descriptor does not exist then creates it. SectionDescriptor & getOrCreateSectionDescriptor(DebugSectionKind SectionKind) { - SectionsSetTy::iterator It = SectionDescriptors.find(SectionKind); - - if (It == SectionDescriptors.end()) { - SectionDescriptor *Section = - new SectionDescriptor(SectionKind, GlobalData, Format, Endianness); - auto Result = SectionDescriptors.try_emplace(SectionKind, Section); - assert(Result.second); + auto [It, Inserted] = SectionDescriptors.try_emplace(SectionKind); - It = Result.first; - } + if (Inserted) + It->second = std::make_shared<SectionDescriptor>(SectionKind, GlobalData, + Format, Endianness); return *It->second; } diff --git a/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp index 0d7a51b..4cce4a7 100644 --- a/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp +++ b/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp @@ -219,7 +219,7 @@ void MCJIT::generateCodeForModule(Module *M) { std::string Buf; raw_string_ostream OS(Buf); logAllUnhandledErrors(LoadedObject.takeError(), OS); - report_fatal_error(Twine(OS.str())); + report_fatal_error(Twine(Buf)); } std::unique_ptr<RuntimeDyld::LoadedObjectInfo> L = Dyld.loadObject(*LoadedObject.get()); diff --git a/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderPerf.cpp b/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderPerf.cpp index f7852b0..f40d93f 100644 --- a/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderPerf.cpp +++ b/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderPerf.cpp @@ -346,11 +346,11 @@ static Error registerJITLoaderPerfStartImpl() { // Need to open ourselves, because we need to hand the FD to OpenMarker() and // raw_fd_ostream doesn't expose the FD. using sys::fs::openFileForWrite; - if (auto EC = openFileForReadWrite(FilenameBuf.str(), Tentative.DumpFd, + if (auto EC = openFileForReadWrite(Filename, Tentative.DumpFd, sys::fs::CD_CreateNew, sys::fs::OF_None)) { std::string ErrStr; raw_string_ostream ErrStream(ErrStr); - ErrStream << "could not open JIT dump file " << FilenameBuf.str() << ": " + ErrStream << "could not open JIT dump file " << Filename << ": " << EC.message() << "\n"; return make_error<StringError>(std::move(ErrStr), inconvertibleErrorCode()); } diff --git a/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp b/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp index cf9ed7db..4d14a60 100644 --- a/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp +++ b/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp @@ -199,10 +199,9 @@ PerfJITEventListener::PerfJITEventListener() // Need to open ourselves, because we need to hand the FD to OpenMarker() and // raw_fd_ostream doesn't expose the FD. using sys::fs::openFileForWrite; - if (auto EC = - openFileForReadWrite(FilenameBuf.str(), DumpFd, - sys::fs::CD_CreateNew, sys::fs::OF_None)) { - errs() << "could not open JIT dump file " << FilenameBuf.str() << ": " + if (auto EC = openFileForReadWrite(Filename, DumpFd, sys::fs::CD_CreateNew, + sys::fs::OF_None)) { + errs() << "could not open JIT dump file " << Filename << ": " << EC.message() << "\n"; return; } diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp index b98d455..d3d3735 100644 --- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp @@ -304,10 +304,10 @@ private: if (auto E = TI.takeError()) { errs() << "Error obtaining instruction printer: " << toString(std::move(E)) << "\n"; - return std::make_pair(EvalResult(ErrMsgStream.str()), ""); + return; } Inst.dump_pretty(ErrMsgStream, TI->InstPrinter.get()); - return std::make_pair(EvalResult(ErrMsgStream.str()), ""); + return; }; if (OpIdx >= Inst.getNumOperands()) { @@ -319,7 +319,8 @@ private: << format("%i", Inst.getNumOperands()) << " operands.\nInstruction is:\n "; - return printInst(Symbol, Inst, ErrMsgStream); + printInst(Symbol, Inst, ErrMsgStream); + return {EvalResult(std::move(ErrMsg)), ""}; } const MCOperand &Op = Inst.getOperand(OpIdx); @@ -329,7 +330,8 @@ private: ErrMsgStream << "Operand '" << format("%i", OpIdx) << "' of instruction '" << Symbol << "' is not an immediate.\nInstruction is:\n "; - return printInst(Symbol, Inst, ErrMsgStream); + printInst(Symbol, Inst, ErrMsgStream); + return {EvalResult(std::move(ErrMsg)), ""}; } return std::make_pair(EvalResult(Op.getImm()), RemainingExpr); diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp index 736d9a3..b4e088d 100644 --- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp @@ -1257,7 +1257,7 @@ RuntimeDyldELF::processRelocationRef( std::string Buf; raw_string_ostream OS(Buf); logAllUnhandledErrors(SymTypeOrErr.takeError(), OS); - report_fatal_error(Twine(OS.str())); + report_fatal_error(Twine(Buf)); } SymType = *SymTypeOrErr; } @@ -1277,7 +1277,7 @@ RuntimeDyldELF::processRelocationRef( std::string Buf; raw_string_ostream OS(Buf); logAllUnhandledErrors(SectionOrErr.takeError(), OS); - report_fatal_error(Twine(OS.str())); + report_fatal_error(Twine(Buf)); } section_iterator si = *SectionOrErr; if (si == Obj.section_end()) diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h index c079d88..19e4225 100644 --- a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h @@ -30,7 +30,7 @@ static bool isThumbFunc(object::symbol_iterator Symbol, std::string Buf; raw_string_ostream OS(Buf); logAllUnhandledErrors(SymTypeOrErr.takeError(), OS); - report_fatal_error(Twine(OS.str())); + report_fatal_error(Twine(Buf)); } if (*SymTypeOrErr != object::SymbolRef::ST_Function) diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp index 82ff4e1..8767c29 100644 --- a/llvm/lib/IR/Function.cpp +++ b/llvm/lib/IR/Function.cpp @@ -765,6 +765,11 @@ Attribute Function::getAttributeAtIndex(unsigned i, StringRef Kind) const { return AttributeSets.getAttributeAtIndex(i, Kind); } +bool Function::hasAttributeAtIndex(unsigned Idx, + Attribute::AttrKind Kind) const { + return AttributeSets.hasAttributeAtIndex(Idx, Kind); +} + Attribute Function::getFnAttribute(Attribute::AttrKind Kind) const { return AttributeSets.getFnAttr(Kind); } diff --git a/llvm/lib/MCA/HardwareUnits/ResourceManager.cpp b/llvm/lib/MCA/HardwareUnits/ResourceManager.cpp index 8d99695..8203020 100644 --- a/llvm/lib/MCA/HardwareUnits/ResourceManager.cpp +++ b/llvm/lib/MCA/HardwareUnits/ResourceManager.cpp @@ -344,9 +344,90 @@ uint64_t ResourceManager::checkAvailability(const InstrDesc &Desc) const { return BusyResourceMask; } -void ResourceManager::issueInstruction( - const InstrDesc &Desc, - SmallVectorImpl<std::pair<ResourceRef, ReleaseAtCycles>> &Pipes) { +void ResourceManager::issueInstructionImpl( + const InstrDesc &Desc, SmallVectorImpl<ResourceWithCycles> &Pipes) { + + // Step 1. + // - Issue writes to non-group resources. + // - Issue writes to groups with only a single resource unit available. + // - Update reserved groups (if any) + // - Add any remaining resource usage requests to a Worklist. + SmallVector<std::pair<uint64_t, ResourceUsage>, 4> Worklist; + + using ResourceWithUsage = std::pair<uint64_t, ResourceUsage>; + + for (const ResourceWithUsage &R : Desc.Resources) { + const CycleSegment &CS = R.second.CS; + if (!CS.size()) { + releaseResource(R.first); + continue; + } + + assert(CS.begin() == 0 && "Invalid {Start, End} cycles!"); + if (R.second.isReserved()) { + assert((llvm::popcount(R.first) > 1) && "Expected a group!"); + // Mark this group as reserved. + assert(R.second.isReserved()); + reserveResource(R.first); + BusyResources[ResourceRef(R.first, R.first)] += CS.size(); + continue; + } + + const ResourceState &RS = *Resources[getResourceStateIndex(R.first)]; + if (RS.isAResourceGroup() && RS.getNumReadyUnits() > 1) { + Worklist.push_back(R); + continue; + } + + ResourceRef Pipe = selectPipe(R.first); + use(Pipe); + BusyResources[Pipe] += CS.size(); + Pipes.emplace_back(std::make_pair(Pipe, ReleaseAtCycles(CS.size()))); + } + + // Step 2. + // Prioritize writes to groups with less available resources. + // NOTE: this algorithm has quadratic complexity in the worst case scenario. + // On average, this algorithm is expected to perform quite well and always + // converge in very few iterations. That is mainly because instructions rarely + // consume more than two or three resource groups. + + while (!Worklist.empty()) { + sort(Worklist, [&](const ResourceWithUsage &Lhs, + const ResourceWithUsage &Rhs) { + const ResourceState &LhsRS = *Resources[getResourceStateIndex(Lhs.first)]; + const ResourceState &RhsRS = *Resources[getResourceStateIndex(Rhs.first)]; + uint64_t LhsReadyUnits = LhsRS.getNumReadyUnits(); + uint64_t RhsReadyUnits = RhsRS.getNumReadyUnits(); + if (LhsReadyUnits == RhsReadyUnits) + return Lhs.first < Rhs.first; + return LhsReadyUnits < RhsReadyUnits; + }); + + SmallVector<ResourceWithUsage, 4> NewWorklist; + + for (unsigned I = 0, E = Worklist.size(); I < E; ++I) { + const auto &Elt = Worklist[I]; + const ResourceState &RS = *Resources[getResourceStateIndex(Elt.first)]; + + if (I == 0 || RS.getNumReadyUnits() == 1) { + ResourceRef Pipe = selectPipe(Elt.first); + use(Pipe); + const CycleSegment &CS = Elt.second.CS; + BusyResources[Pipe] += CS.size(); + Pipes.emplace_back(std::make_pair(Pipe, ReleaseAtCycles(CS.size()))); + continue; + } + + NewWorklist.push_back(Elt); + } + + swap(NewWorklist, Worklist); + }; +} + +void ResourceManager::fastIssueInstruction( + const InstrDesc &Desc, SmallVectorImpl<ResourceWithCycles> &Pipes) { for (const std::pair<uint64_t, ResourceUsage> &R : Desc.Resources) { const CycleSegment &CS = R.second.CS; if (!CS.size()) { diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp index c34f914..0d63ed2 100644 --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -183,7 +183,6 @@ #include "llvm/Transforms/IPO/StripSymbols.h" #include "llvm/Transforms/IPO/WholeProgramDevirt.h" #include "llvm/Transforms/InstCombine/InstCombine.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Instrumentation/AddressSanitizer.h" #include "llvm/Transforms/Instrumentation/BoundsChecking.h" #include "llvm/Transforms/Instrumentation/CGProfile.h" @@ -299,6 +298,7 @@ #include "llvm/Transforms/Utils/HelloWorld.h" #include "llvm/Transforms/Utils/InjectTLIMappings.h" #include "llvm/Transforms/Utils/InstructionNamer.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include "llvm/Transforms/Utils/LCSSA.h" #include "llvm/Transforms/Utils/LibCallsShrinkWrap.h" #include "llvm/Transforms/Utils/LoopSimplify.h" diff --git a/llvm/lib/TableGen/Record.cpp b/llvm/lib/TableGen/Record.cpp index 26f45d5..97ae0b0 100644 --- a/llvm/lib/TableGen/Record.cpp +++ b/llvm/lib/TableGen/Record.cpp @@ -3027,6 +3027,21 @@ Record::getValueAsListOfDefs(StringRef FieldName) const { return Defs; } +std::vector<const Record *> +Record::getValueAsListOfConstDefs(StringRef FieldName) const { + ListInit *List = getValueAsListInit(FieldName); + std::vector<const Record *> Defs; + for (const Init *I : List->getValues()) { + if (const DefInit *DI = dyn_cast<DefInit>(I)) + Defs.push_back(DI->getDef()); + else + PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + + FieldName + + "' list is not entirely DefInit!"); + } + return Defs; +} + int64_t Record::getValueAsInt(StringRef FieldName) const { const RecordVal *R = getValue(FieldName); if (!R || !R->getValue()) diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td index 9f7ffff..a47de9a 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -4729,7 +4729,7 @@ defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>; defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>; // AArch64's FCVT instructions saturate when out of range. -multiclass FPToIntegerSatPats<SDNode to_int_sat, string INST> { +multiclass FPToIntegerSatPats<SDNode to_int_sat, SDNode to_int_sat_gi, string INST> { let Predicates = [HasFullFP16] in { def : Pat<(i32 (to_int_sat f16:$Rn, i32)), (!cast<Instruction>(INST # UWHr) f16:$Rn)>; @@ -4746,6 +4746,21 @@ multiclass FPToIntegerSatPats<SDNode to_int_sat, string INST> { (!cast<Instruction>(INST # UXDr) f64:$Rn)>; let Predicates = [HasFullFP16] in { + def : Pat<(i32 (to_int_sat_gi f16:$Rn)), + (!cast<Instruction>(INST # UWHr) f16:$Rn)>; + def : Pat<(i64 (to_int_sat_gi f16:$Rn)), + (!cast<Instruction>(INST # UXHr) f16:$Rn)>; + } + def : Pat<(i32 (to_int_sat_gi f32:$Rn)), + (!cast<Instruction>(INST # UWSr) f32:$Rn)>; + def : Pat<(i64 (to_int_sat_gi f32:$Rn)), + (!cast<Instruction>(INST # UXSr) f32:$Rn)>; + def : Pat<(i32 (to_int_sat_gi f64:$Rn)), + (!cast<Instruction>(INST # UWDr) f64:$Rn)>; + def : Pat<(i64 (to_int_sat_gi f64:$Rn)), + (!cast<Instruction>(INST # UXDr) f64:$Rn)>; + + let Predicates = [HasFullFP16] in { def : Pat<(i32 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i32:$scale), i32)), (!cast<Instruction>(INST # SWHri) $Rn, $scale)>; def : Pat<(i64 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i64:$scale), i64)), @@ -4759,10 +4774,25 @@ multiclass FPToIntegerSatPats<SDNode to_int_sat, string INST> { (!cast<Instruction>(INST # SWDri) $Rn, $scale)>; def : Pat<(i64 (to_int_sat (fmul f64:$Rn, fixedpoint_f64_i64:$scale), i64)), (!cast<Instruction>(INST # SXDri) $Rn, $scale)>; + + let Predicates = [HasFullFP16] in { + def : Pat<(i32 (to_int_sat_gi (fmul f16:$Rn, fixedpoint_f16_i32:$scale))), + (!cast<Instruction>(INST # SWHri) $Rn, $scale)>; + def : Pat<(i64 (to_int_sat_gi (fmul f16:$Rn, fixedpoint_f16_i64:$scale))), + (!cast<Instruction>(INST # SXHri) $Rn, $scale)>; + } + def : Pat<(i32 (to_int_sat_gi (fmul f32:$Rn, fixedpoint_f32_i32:$scale))), + (!cast<Instruction>(INST # SWSri) $Rn, $scale)>; + def : Pat<(i64 (to_int_sat_gi (fmul f32:$Rn, fixedpoint_f32_i64:$scale))), + (!cast<Instruction>(INST # SXSri) $Rn, $scale)>; + def : Pat<(i32 (to_int_sat_gi (fmul f64:$Rn, fixedpoint_f64_i32:$scale))), + (!cast<Instruction>(INST # SWDri) $Rn, $scale)>; + def : Pat<(i64 (to_int_sat_gi (fmul f64:$Rn, fixedpoint_f64_i64:$scale))), + (!cast<Instruction>(INST # SXDri) $Rn, $scale)>; } -defm : FPToIntegerSatPats<fp_to_sint_sat, "FCVTZS">; -defm : FPToIntegerSatPats<fp_to_uint_sat, "FCVTZU">; +defm : FPToIntegerSatPats<fp_to_sint_sat, fp_to_sint_sat_gi, "FCVTZS">; +defm : FPToIntegerSatPats<fp_to_uint_sat, fp_to_uint_sat_gi, "FCVTZU">; multiclass FPToIntegerIntPats<Intrinsic round, string INST> { let Predicates = [HasFullFP16] in { @@ -5308,12 +5338,17 @@ defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", any_fp_to_sint>; defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", any_fp_to_uint>; // AArch64's FCVT instructions saturate when out of range. -multiclass SIMDTwoVectorFPToIntSatPats<SDNode to_int_sat, string INST> { +multiclass SIMDTwoVectorFPToIntSatPats<SDNode to_int_sat, SDNode to_int_sat_gi, string INST> { let Predicates = [HasFullFP16] in { def : Pat<(v4i16 (to_int_sat v4f16:$Rn, i16)), (!cast<Instruction>(INST # v4f16) v4f16:$Rn)>; def : Pat<(v8i16 (to_int_sat v8f16:$Rn, i16)), (!cast<Instruction>(INST # v8f16) v8f16:$Rn)>; + + def : Pat<(v4i16 (to_int_sat_gi v4f16:$Rn)), + (!cast<Instruction>(INST # v4f16) v4f16:$Rn)>; + def : Pat<(v8i16 (to_int_sat_gi v8f16:$Rn)), + (!cast<Instruction>(INST # v8f16) v8f16:$Rn)>; } def : Pat<(v2i32 (to_int_sat v2f32:$Rn, i32)), (!cast<Instruction>(INST # v2f32) v2f32:$Rn)>; @@ -5321,9 +5356,16 @@ multiclass SIMDTwoVectorFPToIntSatPats<SDNode to_int_sat, string INST> { (!cast<Instruction>(INST # v4f32) v4f32:$Rn)>; def : Pat<(v2i64 (to_int_sat v2f64:$Rn, i64)), (!cast<Instruction>(INST # v2f64) v2f64:$Rn)>; + + def : Pat<(v2i32 (to_int_sat_gi v2f32:$Rn)), + (!cast<Instruction>(INST # v2f32) v2f32:$Rn)>; + def : Pat<(v4i32 (to_int_sat_gi v4f32:$Rn)), + (!cast<Instruction>(INST # v4f32) v4f32:$Rn)>; + def : Pat<(v2i64 (to_int_sat_gi v2f64:$Rn)), + (!cast<Instruction>(INST # v2f64) v2f64:$Rn)>; } -defm : SIMDTwoVectorFPToIntSatPats<fp_to_sint_sat, "FCVTZS">; -defm : SIMDTwoVectorFPToIntSatPats<fp_to_uint_sat, "FCVTZU">; +defm : SIMDTwoVectorFPToIntSatPats<fp_to_sint_sat, fp_to_sint_sat_gi, "FCVTZS">; +defm : SIMDTwoVectorFPToIntSatPats<fp_to_uint_sat, fp_to_uint_sat_gi, "FCVTZU">; def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>; def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>; diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp index 3957d21..77a0dab 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -730,6 +730,55 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) .libcallFor( {{s32, s128}, {s64, s128}, {s128, s128}, {s128, s32}, {s128, s64}}); + getActionDefinitionsBuilder({G_FPTOSI_SAT, G_FPTOUI_SAT}) + .legalFor({{s32, s32}, + {s64, s32}, + {s32, s64}, + {s64, s64}, + {v2s64, v2s64}, + {v4s32, v4s32}, + {v2s32, v2s32}}) + .legalIf([=](const LegalityQuery &Query) { + return HasFP16 && + (Query.Types[1] == s16 || Query.Types[1] == v4s16 || + Query.Types[1] == v8s16) && + (Query.Types[0] == s32 || Query.Types[0] == s64 || + Query.Types[0] == v4s16 || Query.Types[0] == v8s16); + }) + // Handle types larger than i64 by scalarizing/lowering. + .scalarizeIf(scalarOrEltWiderThan(0, 64), 0) + .scalarizeIf(scalarOrEltWiderThan(1, 64), 1) + // The range of a fp16 value fits into an i17, so we can lower the width + // to i64. + .narrowScalarIf( + [=](const LegalityQuery &Query) { + return Query.Types[1] == s16 && Query.Types[0].getSizeInBits() > 64; + }, + changeTo(0, s64)) + .lowerIf(::any(scalarWiderThan(0, 64), scalarWiderThan(1, 64)), 0) + .moreElementsToNextPow2(0) + .widenScalarToNextPow2(0, /*MinSize=*/32) + .minScalar(0, s32) + .widenScalarOrEltToNextPow2OrMinSize(1, /*MinSize=*/HasFP16 ? 16 : 32) + .widenScalarIf( + [=](const LegalityQuery &Query) { + unsigned ITySize = Query.Types[0].getScalarSizeInBits(); + return (ITySize == 16 || ITySize == 32 || ITySize == 64) && + ITySize > Query.Types[1].getScalarSizeInBits(); + }, + LegalizeMutations::changeElementSizeTo(1, 0)) + .widenScalarIf( + [=](const LegalityQuery &Query) { + unsigned FTySize = Query.Types[1].getScalarSizeInBits(); + return (FTySize == 16 || FTySize == 32 || FTySize == 64) && + Query.Types[0].getScalarSizeInBits() < FTySize; + }, + LegalizeMutations::changeElementSizeTo(0, 1)) + .widenScalarOrEltToNextPow2(0) + .clampNumElements(0, v4s16, v8s16) + .clampNumElements(0, v2s32, v4s32) + .clampMaxNumElements(0, s64, 2); + getActionDefinitionsBuilder({G_SITOFP, G_UITOFP}) .legalFor({{s32, s32}, {s64, s32}, diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp index 23e1350..8d63c36 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp @@ -535,6 +535,8 @@ bool AArch64RegisterBankInfo::onlyUsesFP(const MachineInstr &MI, switch (MI.getOpcode()) { case TargetOpcode::G_FPTOSI: case TargetOpcode::G_FPTOUI: + case TargetOpcode::G_FPTOSI_SAT: + case TargetOpcode::G_FPTOUI_SAT: case TargetOpcode::G_FCMP: case TargetOpcode::G_LROUND: case TargetOpcode::G_LLROUND: @@ -799,6 +801,8 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { } case TargetOpcode::G_FPTOSI: case TargetOpcode::G_FPTOUI: + case TargetOpcode::G_FPTOSI_SAT: + case TargetOpcode::G_FPTOUI_SAT: case TargetOpcode::G_INTRINSIC_LRINT: case TargetOpcode::G_INTRINSIC_LLRINT: if (MRI.getType(MI.getOperand(0).getReg()).isVector()) diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h index 7b74eab..a4ae8a1 100644 --- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h +++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h @@ -1587,6 +1587,12 @@ public: // the nop. return true; } + + bool requiresDisjointEarlyClobberAndUndef() const override { + // AMDGPU doesn't care if early-clobber and undef operands are allocated + // to the same register. + return false; + } }; class GCNUserSGPRUsageInfo { diff --git a/llvm/lib/Target/AMDGPU/R600Subtarget.h b/llvm/lib/Target/AMDGPU/R600Subtarget.h index c3d002f..7f0f930 100644 --- a/llvm/lib/Target/AMDGPU/R600Subtarget.h +++ b/llvm/lib/Target/AMDGPU/R600Subtarget.h @@ -160,6 +160,12 @@ public: unsigned getMinWavesPerEU() const override { return AMDGPU::IsaInfo::getMinWavesPerEU(this); } + + bool requiresDisjointEarlyClobberAndUndef() const override { + // AMDGPU doesn't care if early-clobber and undef operands are allocated + // to the same register. + return false; + } }; } // end namespace llvm diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp index 8ae7f29..b855b6b 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp @@ -901,7 +901,16 @@ void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII, } } else /* LGKM_CNT || EXP_CNT || VS_CNT || NUM_INST_CNTS */ { // Match the score to the destination registers. - for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { + // + // Check only explicit operands. Stores, especially spill stores, include + // implicit uses and defs of their super registers which would create an + // artificial dependency, while these are there only for register liveness + // accounting purposes. + // + // Special cases where implicit register defs and uses exists, such as + // M0, FLAT_SCR or VCC, but the wait will be generated earlier in the + // generateWaitcntInstBefore() if that was loaded from memory. + for (unsigned I = 0, E = Inst.getNumExplicitOperands(); I != E; ++I) { auto &Op = Inst.getOperand(I); if (!Op.isReg() || !Op.isDef()) continue; diff --git a/llvm/lib/Target/AMDGPU/SILowerWWMCopies.cpp b/llvm/lib/Target/AMDGPU/SILowerWWMCopies.cpp index c6779659..7bff58c 100644 --- a/llvm/lib/Target/AMDGPU/SILowerWWMCopies.cpp +++ b/llvm/lib/Target/AMDGPU/SILowerWWMCopies.cpp @@ -90,9 +90,8 @@ void SILowerWWMCopies::addToWWMSpills(MachineFunction &MF, Register Reg) { if (Reg.isPhysical()) return; - Register PhysReg = VRM->getPhys(Reg); - assert(PhysReg != VirtRegMap::NO_PHYS_REG && - "should have allocated a physical register"); + MCRegister PhysReg = VRM->getPhys(Reg); + assert(PhysReg && "should have allocated a physical register"); MFI->allocateWWMSpill(MF, PhysReg); } diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp index 7101829..1105a96 100644 --- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp @@ -1219,7 +1219,7 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) { assert(DstReg == ARM::SP && "Only stack pointer as a destination reg is supported"); - SmallVector<unsigned, 4> RegList; + SmallVector<MCRegister, 4> RegList; // Skip src & dst reg, and pred ops. unsigned StartOp = 2 + 2; // Use all the operands. diff --git a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h index 58b5e98..926d702 100644 --- a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h +++ b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h @@ -240,20 +240,6 @@ public: unsigned SrcSubReg) const override; int getSEHRegNum(unsigned i) const { return getEncodingValue(i); } - - bool doesRegClassHavePseudoInitUndef( - const TargetRegisterClass *RC) const override { - (void)RC; - // For the ARM Architecture we want to always return true because all - // required PseudoInitUndef types have been added. If compilation fails due - // to `Unexpected register class`, this is likely to be because the specific - // register being used is not support by Init Undef and needs the Pseudo - // Instruction adding to ARMInstrInfo.td. If this is implemented as a - // conditional check, this could create a false positive where Init Undef is - // not running, skipping the instruction and moving to the next. This could - // lead to illegal instructions being generated by the register allocator. - return true; - } }; } // end namespace llvm diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 67653a1..db564d7 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -8017,7 +8017,6 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) isConstant = false; - ValueCounts.insert(std::make_pair(V, 0)); unsigned &Count = ValueCounts[V]; // Is this value dominant? (takes up more than half of the lanes) diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h index 00239ff..fa20f4b 100644 --- a/llvm/lib/Target/ARM/ARMSubtarget.h +++ b/llvm/lib/Target/ARM/ARMSubtarget.h @@ -209,13 +209,6 @@ public: return &InstrInfo->getRegisterInfo(); } - /// The correct instructions have been implemented to initialize undef - /// registers, therefore the ARM Architecture is supported by the Init Undef - /// Pass. This will return true as the pass needs to be supported for all - /// types of instructions. The pass will then perform more checks to ensure it - /// should be applying the Pseudo Instructions. - bool supportsInitUndef() const override { return true; } - const CallLowering *getCallLowering() const override; InstructionSelector *getInstructionSelector() const override; const LegalizerInfo *getLegalizerInfo() const override; diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index 10fef90..3e3f134 100644 --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -759,7 +759,7 @@ public: bool hasMVE() const { return getSTI().hasFeature(ARM::HasMVEIntegerOps); } // Return the low-subreg of a given Q register. - unsigned getDRegFromQReg(unsigned QReg) const { + MCRegister getDRegFromQReg(MCRegister QReg) const { return MRI->getSubReg(QReg, ARM::dsub_0); } @@ -808,7 +808,7 @@ class ARMOperand : public MCParsedAsmOperand { } Kind; SMLoc StartLoc, EndLoc, AlignmentLoc; - SmallVector<unsigned, 8> Registers; + SmallVector<MCRegister, 8> Registers; ARMAsmParser *Parser; @@ -862,12 +862,12 @@ class ARMOperand : public MCParsedAsmOperand { }; struct RegOp { - unsigned RegNum; + MCRegister RegNum; }; // A vector register list is a sequential list of 1 to 4 registers. struct VectorListOp { - unsigned RegNum; + MCRegister RegNum; unsigned Count; unsigned LaneIndex; bool isDoubleSpaced; @@ -883,11 +883,11 @@ class ARMOperand : public MCParsedAsmOperand { /// Combined record for all forms of ARM address expressions. struct MemoryOp { - unsigned BaseRegNum; + MCRegister BaseRegNum; // Offset is in OffsetReg or OffsetImm. If both are zero, no offset // was specified. const MCExpr *OffsetImm; // Offset immediate value - unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL + MCRegister OffsetRegNum; // Offset register num, when OffsetImm == NULL ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg unsigned ShiftImm; // shift for OffsetReg. unsigned Alignment; // 0 = no alignment specified @@ -896,7 +896,7 @@ class ARMOperand : public MCParsedAsmOperand { }; struct PostIdxRegOp { - unsigned RegNum; + MCRegister RegNum; bool isAdd; ARM_AM::ShiftOpc ShiftTy; unsigned ShiftImm; @@ -909,14 +909,14 @@ class ARMOperand : public MCParsedAsmOperand { struct RegShiftedRegOp { ARM_AM::ShiftOpc ShiftTy; - unsigned SrcReg; - unsigned ShiftReg; + MCRegister SrcReg; + MCRegister ShiftReg; unsigned ShiftImm; }; struct RegShiftedImmOp { ARM_AM::ShiftOpc ShiftTy; - unsigned SrcReg; + MCRegister SrcReg; unsigned ShiftImm; }; @@ -1005,7 +1005,7 @@ public: return Reg.RegNum; } - const SmallVectorImpl<unsigned> &getRegList() const { + const SmallVectorImpl<MCRegister> &getRegList() const { assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || @@ -2547,9 +2547,9 @@ public: void addVPTPredROperands(MCInst &Inst, unsigned N) const { assert(N == 4 && "Invalid number of operands!"); addVPTPredNOperands(Inst, N-1); - unsigned RegNum; + MCRegister RegNum; if (getVPTPred() == ARMVCC::None) { - RegNum = 0; + RegNum = MCRegister(); } else { unsigned NextOpIndex = Inst.getNumOperands(); auto &MCID = Parser->getInstrDesc(Inst.getOpcode()); @@ -2630,15 +2630,15 @@ public: void addRegListOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const SmallVectorImpl<unsigned> &RegList = getRegList(); - for (unsigned Reg : RegList) + const SmallVectorImpl<MCRegister> &RegList = getRegList(); + for (MCRegister Reg : RegList) Inst.addOperand(MCOperand::createReg(Reg)); } void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const SmallVectorImpl<unsigned> &RegList = getRegList(); - for (unsigned Reg : RegList) + const SmallVectorImpl<MCRegister> &RegList = getRegList(); + for (MCRegister Reg : RegList) Inst.addOperand(MCOperand::createReg(Reg)); } @@ -3393,7 +3393,7 @@ public: else if (isDReg() && !Parser->hasMVE()) { Inst.addOperand(MCOperand::createReg(Reg.RegNum)); } else if (isQReg() && !Parser->hasMVE()) { - auto DPair = Parser->getDRegFromQReg(Reg.RegNum); + MCRegister DPair = Parser->getDRegFromQReg(Reg.RegNum); DPair = Parser->getMRI()->getMatchingSuperReg( DPair, ARM::dsub_0, &ARMMCRegisterClasses[ARM::DPairRegClassID]); Inst.addOperand(MCOperand::createReg(DPair)); @@ -3684,10 +3684,10 @@ public: return Op; } - static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S, + static std::unique_ptr<ARMOperand> CreateCCOut(MCRegister Reg, SMLoc S, ARMAsmParser &Parser) { auto Op = std::make_unique<ARMOperand>(k_CCOut, Parser); - Op->Reg.RegNum = RegNum; + Op->Reg.RegNum = Reg; Op->StartLoc = S; Op->EndLoc = S; return Op; @@ -3703,19 +3703,19 @@ public: return Op; } - static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S, - SMLoc E, ARMAsmParser &Parser) { + static std::unique_ptr<ARMOperand> CreateReg(MCRegister Reg, SMLoc S, SMLoc E, + ARMAsmParser &Parser) { auto Op = std::make_unique<ARMOperand>(k_Register, Parser); - Op->Reg.RegNum = RegNum; + Op->Reg.RegNum = Reg; Op->StartLoc = S; Op->EndLoc = E; return Op; } static std::unique_ptr<ARMOperand> - CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg, - unsigned ShiftReg, unsigned ShiftImm, SMLoc S, SMLoc E, - ARMAsmParser &Parser) { + CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, MCRegister SrcReg, + MCRegister ShiftReg, unsigned ShiftImm, SMLoc S, + SMLoc E, ARMAsmParser &Parser) { auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister, Parser); Op->RegShiftedReg.ShiftTy = ShTy; Op->RegShiftedReg.SrcReg = SrcReg; @@ -3727,7 +3727,7 @@ public: } static std::unique_ptr<ARMOperand> - CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg, + CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, MCRegister SrcReg, unsigned ShiftImm, SMLoc S, SMLoc E, ARMAsmParser &Parser) { auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate, Parser); @@ -3793,7 +3793,7 @@ public: } static std::unique_ptr<ARMOperand> - CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs, + CreateRegList(SmallVectorImpl<std::pair<unsigned, MCRegister>> &Regs, SMLoc StartLoc, SMLoc EndLoc, ARMAsmParser &Parser) { assert(Regs.size() > 0 && "RegList contains no registers?"); KindTy Kind = k_RegisterList; @@ -3827,10 +3827,10 @@ public: } static std::unique_ptr<ARMOperand> - CreateVectorList(unsigned RegNum, unsigned Count, bool isDoubleSpaced, - SMLoc S, SMLoc E, ARMAsmParser &Parser) { + CreateVectorList(MCRegister Reg, unsigned Count, bool isDoubleSpaced, SMLoc S, + SMLoc E, ARMAsmParser &Parser) { auto Op = std::make_unique<ARMOperand>(k_VectorList, Parser); - Op->VectorList.RegNum = RegNum; + Op->VectorList.RegNum = Reg; Op->VectorList.Count = Count; Op->VectorList.isDoubleSpaced = isDoubleSpaced; Op->StartLoc = S; @@ -3839,10 +3839,10 @@ public: } static std::unique_ptr<ARMOperand> - CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced, + CreateVectorListAllLanes(MCRegister Reg, unsigned Count, bool isDoubleSpaced, SMLoc S, SMLoc E, ARMAsmParser &Parser) { auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes, Parser); - Op->VectorList.RegNum = RegNum; + Op->VectorList.RegNum = Reg; Op->VectorList.Count = Count; Op->VectorList.isDoubleSpaced = isDoubleSpaced; Op->StartLoc = S; @@ -3884,14 +3884,14 @@ public: } static std::unique_ptr<ARMOperand> - CreateMem(unsigned BaseRegNum, const MCExpr *OffsetImm, unsigned OffsetRegNum, + CreateMem(MCRegister BaseReg, const MCExpr *OffsetImm, MCRegister OffsetReg, ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S, SMLoc E, ARMAsmParser &Parser, SMLoc AlignmentLoc = SMLoc()) { auto Op = std::make_unique<ARMOperand>(k_Memory, Parser); - Op->Memory.BaseRegNum = BaseRegNum; + Op->Memory.BaseRegNum = BaseReg; Op->Memory.OffsetImm = OffsetImm; - Op->Memory.OffsetRegNum = OffsetRegNum; + Op->Memory.OffsetRegNum = OffsetReg; Op->Memory.ShiftType = ShiftType; Op->Memory.ShiftImm = ShiftImm; Op->Memory.Alignment = Alignment; @@ -3903,10 +3903,10 @@ public: } static std::unique_ptr<ARMOperand> - CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy, + CreatePostIdxReg(MCRegister Reg, bool isAdd, ARM_AM::ShiftOpc ShiftTy, unsigned ShiftImm, SMLoc S, SMLoc E, ARMAsmParser &Parser) { auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister, Parser); - Op->PostIdxReg.RegNum = RegNum; + Op->PostIdxReg.RegNum = Reg; Op->PostIdxReg.isAdd = isAdd; Op->PostIdxReg.ShiftTy = ShiftTy; Op->PostIdxReg.ShiftImm = ShiftImm; @@ -4103,9 +4103,8 @@ void ARMOperand::print(raw_ostream &OS) const { case k_FPDRegisterListWithVPR: { OS << "<register_list "; - const SmallVectorImpl<unsigned> &RegList = getRegList(); - for (SmallVectorImpl<unsigned>::const_iterator - I = RegList.begin(), E = RegList.end(); I != E; ) { + const SmallVectorImpl<MCRegister> &RegList = getRegList(); + for (auto I = RegList.begin(), E = RegList.end(); I != E;) { OS << RegName(*I); if (++I < E) OS << ", "; } @@ -4311,11 +4310,11 @@ int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) { (ARMOperand *)Operands.pop_back_val().release()); if (!PrevOp->isReg()) return Error(PrevOp->getStartLoc(), "shift must be of a register"); - int SrcReg = PrevOp->getReg(); + MCRegister SrcReg = PrevOp->getReg(); SMLoc EndLoc; int64_t Imm = 0; - int ShiftReg = 0; + MCRegister ShiftReg; if (ShiftTy == ARM_AM::rrx) { // RRX Doesn't have an explicit shift amount. The encoder expects // the shift register to be the same as the source register. Seems odd, @@ -4591,8 +4590,8 @@ static unsigned getNextRegister(unsigned Reg) { // Insert an <Encoding, Register> pair in an ordered vector. Return true on // success, or false, if duplicate encoding found. static bool -insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs, - unsigned Enc, unsigned Reg) { +insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, MCRegister>> &Regs, + unsigned Enc, MCRegister Reg) { Regs.emplace_back(Enc, Reg); for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) { if (J->first == Enc) { @@ -4626,7 +4625,7 @@ bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder, // The reglist instructions have at most 16 registers, so reserve // space for that many. int EReg = 0; - SmallVector<std::pair<unsigned, unsigned>, 16> Registers; + SmallVector<std::pair<unsigned, MCRegister>, 16> Registers; // Allow Q regs and just interpret them as the two D sub-registers. if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { @@ -7409,9 +7408,9 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID); // Adjust only if Op1 is a GPR. if (Op1.isReg() && MRC.contains(Op1.getReg())) { - unsigned Reg1 = Op1.getReg(); + MCRegister Reg1 = Op1.getReg(); unsigned Rt = MRI->getEncodingValue(Reg1); - unsigned Reg2 = Op2.getReg(); + MCRegister Reg2 = Op2.getReg(); unsigned Rt2 = MRI->getEncodingValue(Reg2); // Rt2 must be Rt + 1. if (Rt + 1 != Rt2) @@ -7426,7 +7425,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, IsLoad ? "destination operands must start start at an even register" : "source operands must start start at an even register"); - unsigned NewReg = MRI->getMatchingSuperReg( + MCRegister NewReg = MRI->getMatchingSuperReg( Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID))); Operands[Idx] = ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc(), *this); @@ -7464,7 +7463,7 @@ static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo, bool &containsReg) { containsReg = false; for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { - unsigned OpReg = Inst.getOperand(i).getReg(); + MCRegister OpReg = Inst.getOperand(i).getReg(); if (OpReg == Reg) containsReg = true; // Anything other than a low register isn't legal here. @@ -7776,7 +7775,7 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst, return true; break; case ARM::t2BXJ: { - const unsigned RmReg = Inst.getOperand(0).getReg(); + const MCRegister RmReg = Inst.getOperand(0).getReg(); // Rm = SP is no longer unpredictable in v8-A if (RmReg == ARM::SP && !hasV8Ops()) return Error(Operands[MnemonicOpsEndInd]->getStartLoc(), @@ -8054,7 +8053,7 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst, // // Thumb LDM instructions are writeback iff the base register is not // in the register list. - unsigned Rn = Inst.getOperand(0).getReg(); + MCRegister Rn = Inst.getOperand(0).getReg(); bool HasWritebackToken = (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]) .isToken() && @@ -8508,8 +8507,8 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst, case ARM::t2SMLSLD: case ARM::t2SMLSLDX: case ARM::t2SMULL: { - unsigned RdHi = Inst.getOperand(0).getReg(); - unsigned RdLo = Inst.getOperand(1).getReg(); + MCRegister RdHi = Inst.getOperand(0).getReg(); + MCRegister RdLo = Inst.getOperand(1).getReg(); if(RdHi == RdLo) { return Error(Loc, "unpredictable instruction, RdHi and RdLo must be different"); @@ -12520,7 +12519,7 @@ bool ARMAsmParser::parseDirectiveSEHSaveRegs(SMLoc L, bool Wide) { ARMOperand &Op = (ARMOperand &)*Operands[0]; if (!Op.isRegList()) return Error(L, ".seh_save_regs{_w} expects GPR registers"); - const SmallVectorImpl<unsigned> &RegList = Op.getRegList(); + const SmallVectorImpl<MCRegister> &RegList = Op.getRegList(); uint32_t Mask = 0; for (size_t i = 0; i < RegList.size(); ++i) { unsigned Reg = MRI->getEncodingValue(RegList[i]); @@ -12562,7 +12561,7 @@ bool ARMAsmParser::parseDirectiveSEHSaveFRegs(SMLoc L) { ARMOperand &Op = (ARMOperand &)*Operands[0]; if (!Op.isDPRRegList()) return Error(L, ".seh_save_fregs expects DPR registers"); - const SmallVectorImpl<unsigned> &RegList = Op.getRegList(); + const SmallVectorImpl<MCRegister> &RegList = Op.getRegList(); uint32_t Mask = 0; for (size_t i = 0; i < RegList.size(); ++i) { unsigned Reg = MRI->getEncodingValue(RegList[i]); diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp index c8ecb4a..9f6bc31 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp @@ -1188,7 +1188,7 @@ uint64_t ARMAsmBackendDarwin::generateCompactUnwindEncoding( MCRegister CFARegister = ARM::SP; int CFARegisterOffset = 0; // Mark savable registers as initially unsaved - DenseMap<unsigned, int> RegOffsets; + DenseMap<MCRegister, int> RegOffsets; int FloatRegCount = 0; // Process each .cfi directive and build up compact unwind info. for (const MCCFIInstruction &Inst : Instrs) { @@ -1246,12 +1246,12 @@ uint64_t ARMAsmBackendDarwin::generateCompactUnwindEncoding( } int StackAdjust = CFARegisterOffset - 8; if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) { - DEBUG_WITH_TYPE("compact-unwind", - llvm::dbgs() - << "LR not saved as standard frame, StackAdjust=" - << StackAdjust - << ", CFARegisterOffset=" << CFARegisterOffset - << ", lr save at offset=" << RegOffsets[14] << "\n"); + DEBUG_WITH_TYPE( + "compact-unwind", + llvm::dbgs() << "LR not saved as standard frame, StackAdjust=" + << StackAdjust + << ", CFARegisterOffset=" << CFARegisterOffset + << ", lr save at offset=" << RegOffsets[ARM::LR] << "\n"); return CU::UNWIND_ARM_MODE_DWARF; } if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) { @@ -1332,7 +1332,7 @@ uint64_t ARMAsmBackendDarwin::generateCompactUnwindEncoding( // Floating point registers must either be saved sequentially, or we defer to // DWARF. No gaps allowed here so check that each saved d-register is // precisely where it should be. - static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 }; + static MCPhysReg FPRCSRegs[] = {ARM::D8, ARM::D10, ARM::D12, ARM::D14}; for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) { auto Offset = RegOffsets.find(FPRCSRegs[Idx]); if (Offset == RegOffsets.end()) { diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h b/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h index 35c2d6c..e56cb02 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h @@ -157,9 +157,9 @@ namespace ARM_ISB { /// isARMLowRegister - Returns true if the register is a low register (r0-r7). /// -static inline bool isARMLowRegister(unsigned Reg) { +static inline bool isARMLowRegister(MCRegister Reg) { using namespace ARM; - switch (Reg) { + switch (Reg.id()) { case R0: case R1: case R2: case R3: case R4: case R5: case R6: case R7: return true; diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp index c9631bd..ff2b557 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp @@ -83,7 +83,7 @@ class ARMTargetAsmStreamer : public ARMTargetStreamer { void emitSetFP(unsigned FpReg, unsigned SpReg, int64_t Offset = 0) override; void emitMovSP(unsigned Reg, int64_t Offset = 0) override; void emitPad(int64_t Offset) override; - void emitRegSave(const SmallVectorImpl<unsigned> &RegList, + void emitRegSave(const SmallVectorImpl<MCRegister> &RegList, bool isVector) override; void emitUnwindRaw(int64_t Offset, const SmallVectorImpl<uint8_t> &Opcodes) override; @@ -165,8 +165,8 @@ void ARMTargetAsmStreamer::emitPad(int64_t Offset) { OS << "\t.pad\t#" << Offset << '\n'; } -void ARMTargetAsmStreamer::emitRegSave(const SmallVectorImpl<unsigned> &RegList, - bool isVector) { +void ARMTargetAsmStreamer::emitRegSave( + const SmallVectorImpl<MCRegister> &RegList, bool isVector) { assert(RegList.size() && "RegList should not be empty"); if (isVector) OS << "\t.vsave\t{"; @@ -404,7 +404,7 @@ private: void emitSetFP(unsigned FpReg, unsigned SpReg, int64_t Offset = 0) override; void emitMovSP(unsigned Reg, int64_t Offset = 0) override; void emitPad(int64_t Offset) override; - void emitRegSave(const SmallVectorImpl<unsigned> &RegList, + void emitRegSave(const SmallVectorImpl<MCRegister> &RegList, bool isVector) override; void emitUnwindRaw(int64_t Offset, const SmallVectorImpl<uint8_t> &Opcodes) override; @@ -472,7 +472,7 @@ public: void emitSetFP(unsigned NewFpReg, unsigned NewSpReg, int64_t Offset = 0); void emitMovSP(unsigned Reg, int64_t Offset = 0); void emitPad(int64_t Offset); - void emitRegSave(const SmallVectorImpl<unsigned> &RegList, bool isVector); + void emitRegSave(const SmallVectorImpl<MCRegister> &RegList, bool isVector); void emitUnwindRaw(int64_t Offset, const SmallVectorImpl<uint8_t> &Opcodes); void emitFill(const MCExpr &NumBytes, uint64_t FillValue, SMLoc Loc) override { @@ -766,8 +766,8 @@ void ARMTargetELFStreamer::emitPad(int64_t Offset) { getStreamer().emitPad(Offset); } -void ARMTargetELFStreamer::emitRegSave(const SmallVectorImpl<unsigned> &RegList, - bool isVector) { +void ARMTargetELFStreamer::emitRegSave( + const SmallVectorImpl<MCRegister> &RegList, bool isVector) { getStreamer().emitRegSave(RegList, isVector); } @@ -1412,17 +1412,17 @@ void ARMELFStreamer::emitPad(int64_t Offset) { static std::pair<unsigned, unsigned> collectHWRegs(const MCRegisterInfo &MRI, unsigned Idx, - const SmallVectorImpl<unsigned> &RegList, bool IsVector, + const SmallVectorImpl<MCRegister> &RegList, bool IsVector, uint32_t &Mask_) { uint32_t Mask = 0; unsigned Count = 0; while (Idx > 0) { - unsigned Reg = RegList[Idx - 1]; + MCRegister Reg = RegList[Idx - 1]; if (Reg == ARM::RA_AUTH_CODE) break; - Reg = MRI.getEncodingValue(Reg); - assert(Reg < (IsVector ? 32U : 16U) && "Register out of range"); - unsigned Bit = (1u << Reg); + unsigned RegEnc = MRI.getEncodingValue(Reg); + assert(RegEnc < (IsVector ? 32U : 16U) && "Register out of range"); + unsigned Bit = (1u << RegEnc); if ((Mask & Bit) == 0) { Mask |= Bit; ++Count; @@ -1434,7 +1434,7 @@ collectHWRegs(const MCRegisterInfo &MRI, unsigned Idx, return {Idx, Count}; } -void ARMELFStreamer::emitRegSave(const SmallVectorImpl<unsigned> &RegList, +void ARMELFStreamer::emitRegSave(const SmallVectorImpl<MCRegister> &RegList, bool IsVector) { uint32_t Mask; unsigned Idx, Count; diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp index 24e627c..8a7339f 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp @@ -260,7 +260,7 @@ void ARMInstPrinter::printInst(const MCInst *MI, uint64_t Address, case ARM::tLDMIA: { bool Writeback = true; - unsigned BaseReg = MI->getOperand(0).getReg(); + MCRegister BaseReg = MI->getOperand(0).getReg(); for (unsigned i = 3; i < MI->getNumOperands(); ++i) { if (MI->getOperand(i).getReg() == BaseReg) Writeback = false; @@ -291,7 +291,7 @@ void ARMInstPrinter::printInst(const MCInst *MI, uint64_t Address, case ARM::STLEXD: { const MCRegisterClass &MRC = MRI.getRegClass(ARM::GPRRegClassID); bool isStore = Opcode == ARM::STREXD || Opcode == ARM::STLEXD; - unsigned Reg = MI->getOperand(isStore ? 1 : 0).getReg(); + MCRegister Reg = MI->getOperand(isStore ? 1 : 0).getReg(); if (MRC.contains(Reg)) { MCInst NewMI; MCOperand NewReg; @@ -342,7 +342,7 @@ void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O) { const MCOperand &Op = MI->getOperand(OpNo); if (Op.isReg()) { - unsigned Reg = Op.getReg(); + MCRegister Reg = Op.getReg(); printRegName(O, Reg); } else if (Op.isImm()) { markup(O, Markup::Immediate) << '#' << formatImm(Op.getImm()); @@ -871,7 +871,7 @@ void ARMInstPrinter::printRegisterList(const MCInst *MI, unsigned OpNum, void ARMInstPrinter::printGPRPairOperand(const MCInst *MI, unsigned OpNum, const MCSubtargetInfo &STI, raw_ostream &O) { - unsigned Reg = MI->getOperand(OpNum).getReg(); + MCRegister Reg = MI->getOperand(OpNum).getReg(); printRegName(O, MRI.getSubReg(Reg, ARM::gsub_0)); O << ", "; printRegName(O, MRI.getSubReg(Reg, ARM::gsub_1)); @@ -1141,7 +1141,7 @@ void ARMInstPrinter::printThumbAddrModeRROperand(const MCInst *MI, unsigned Op, WithMarkup ScopedMarkup = markup(O, Markup::Memory); O << "["; printRegName(O, MO1.getReg()); - if (unsigned RegNum = MO2.getReg()) { + if (MCRegister RegNum = MO2.getReg()) { O << ", "; printRegName(O, RegNum); } @@ -1208,7 +1208,7 @@ void ARMInstPrinter::printT2SOOperand(const MCInst *MI, unsigned OpNum, const MCOperand &MO1 = MI->getOperand(OpNum); const MCOperand &MO2 = MI->getOperand(OpNum + 1); - unsigned Reg = MO1.getReg(); + MCRegister Reg = MO1.getReg(); printRegName(O, Reg); // Print the shift opc. @@ -1490,9 +1490,9 @@ void ARMInstPrinter::printVectorListOne(const MCInst *MI, unsigned OpNum, void ARMInstPrinter::printVectorListTwo(const MCInst *MI, unsigned OpNum, const MCSubtargetInfo &STI, raw_ostream &O) { - unsigned Reg = MI->getOperand(OpNum).getReg(); - unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0); - unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_1); + MCRegister Reg = MI->getOperand(OpNum).getReg(); + MCRegister Reg0 = MRI.getSubReg(Reg, ARM::dsub_0); + MCRegister Reg1 = MRI.getSubReg(Reg, ARM::dsub_1); O << "{"; printRegName(O, Reg0); O << ", "; @@ -1503,9 +1503,9 @@ void ARMInstPrinter::printVectorListTwo(const MCInst *MI, unsigned OpNum, void ARMInstPrinter::printVectorListTwoSpaced(const MCInst *MI, unsigned OpNum, const MCSubtargetInfo &STI, raw_ostream &O) { - unsigned Reg = MI->getOperand(OpNum).getReg(); - unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0); - unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_2); + MCRegister Reg = MI->getOperand(OpNum).getReg(); + MCRegister Reg0 = MRI.getSubReg(Reg, ARM::dsub_0); + MCRegister Reg1 = MRI.getSubReg(Reg, ARM::dsub_2); O << "{"; printRegName(O, Reg0); O << ", "; @@ -1558,9 +1558,9 @@ void ARMInstPrinter::printVectorListTwoAllLanes(const MCInst *MI, unsigned OpNum, const MCSubtargetInfo &STI, raw_ostream &O) { - unsigned Reg = MI->getOperand(OpNum).getReg(); - unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0); - unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_1); + MCRegister Reg = MI->getOperand(OpNum).getReg(); + MCRegister Reg0 = MRI.getSubReg(Reg, ARM::dsub_0); + MCRegister Reg1 = MRI.getSubReg(Reg, ARM::dsub_1); O << "{"; printRegName(O, Reg0); O << "[], "; @@ -1605,9 +1605,9 @@ void ARMInstPrinter::printVectorListFourAllLanes(const MCInst *MI, void ARMInstPrinter::printVectorListTwoSpacedAllLanes( const MCInst *MI, unsigned OpNum, const MCSubtargetInfo &STI, raw_ostream &O) { - unsigned Reg = MI->getOperand(OpNum).getReg(); - unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0); - unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_2); + MCRegister Reg = MI->getOperand(OpNum).getReg(); + MCRegister Reg0 = MRI.getSubReg(Reg, ARM::dsub_0); + MCRegister Reg1 = MRI.getSubReg(Reg, ARM::dsub_2); O << "{"; printRegName(O, Reg0); O << "[], "; @@ -1684,7 +1684,7 @@ template<unsigned NumRegs> void ARMInstPrinter::printMVEVectorList(const MCInst *MI, unsigned OpNum, const MCSubtargetInfo &STI, raw_ostream &O) { - unsigned Reg = MI->getOperand(OpNum).getReg(); + MCRegister Reg = MI->getOperand(OpNum).getReg(); const char *Prefix = "{"; for (unsigned i = 0; i < NumRegs; i++) { O << Prefix; diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp index 3f37acf..92427b4 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp @@ -543,7 +543,7 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const { if (MO.isReg()) { - unsigned Reg = MO.getReg(); + MCRegister Reg = MO.getReg(); unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg); // In NEON, Q registers are encoded as 2x their register number, @@ -555,7 +555,7 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO, if (STI.hasFeature(ARM::HasMVEIntegerOps)) return RegNo; - switch (Reg) { + switch (Reg.id()) { default: return RegNo; case ARM::Q0: case ARM::Q1: case ARM::Q2: case ARM::Q3: @@ -711,7 +711,7 @@ static bool HasConditionalBranch(const MCInst &MI) { const MCOperand &MCOp1 = MI.getOperand(i); const MCOperand &MCOp2 = MI.getOperand(i + 1); if (MCOp1.isImm() && MCOp2.isReg() && - (MCOp2.getReg() == 0 || MCOp2.getReg() == ARM::CPSR)) { + (!MCOp2.getReg() || MCOp2.getReg() == ARM::CPSR)) { if (ARMCC::CondCodes(MCOp1.getImm()) != ARMCC::AL) return true; } @@ -1311,7 +1311,7 @@ getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx, const MCOperand &MO1 = MI.getOperand(OpIdx+1); unsigned Imm = MO1.getImm(); bool isAdd = ARM_AM::getAM2Op(Imm) == ARM_AM::add; - bool isReg = MO.getReg() != 0; + bool isReg = MO.getReg().isValid(); uint32_t Binary = ARM_AM::getAM2Offset(Imm); // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm12 if (isReg) { @@ -1347,7 +1347,7 @@ getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx, const MCOperand &MO1 = MI.getOperand(OpIdx+1); unsigned Imm = MO1.getImm(); bool isAdd = ARM_AM::getAM3Op(Imm) == ARM_AM::add; - bool isImm = MO.getReg() == 0; + bool isImm = !MO.getReg().isValid(); uint32_t Imm8 = ARM_AM::getAM3Offset(Imm); // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8 if (!isImm) @@ -1383,7 +1383,7 @@ getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx, unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); unsigned Imm = MO2.getImm(); bool isAdd = ARM_AM::getAM3Op(Imm) == ARM_AM::add; - bool isImm = MO1.getReg() == 0; + bool isImm = !MO1.getReg().isValid(); uint32_t Imm8 = ARM_AM::getAM3Offset(Imm); // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8 if (!isImm) @@ -1537,7 +1537,7 @@ getSORegRegOpValue(const MCInst &MI, unsigned OpIdx, // Encode the shift opcode. unsigned SBits = 0; - unsigned Rs = MO1.getReg(); + MCRegister Rs = MO1.getReg(); if (Rs) { // Set shift operand (bit[7:4]). // LSL - 0001 @@ -1737,7 +1737,7 @@ getRegisterListOpValue(const MCInst &MI, unsigned Op, // // LDM/STM: // {15-0} = Bitfield of GPRs. - unsigned Reg = MI.getOperand(Op).getReg(); + MCRegister Reg = MI.getOperand(Op).getReg(); bool SPRRegs = ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg); bool DPRRegs = ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg); @@ -1851,7 +1851,8 @@ getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const { const MCOperand &MO = MI.getOperand(Op); - if (MO.getReg() == 0) return 0x0D; + if (!MO.getReg()) + return 0x0D; return CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); } diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp index cf4fc37..01a2713 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp @@ -111,7 +111,7 @@ static bool getARMLoadDeprecationInfo(MCInst &MI, const MCSubtargetInfo &STI, bool ListContainsPC = false, ListContainsLR = false; for (unsigned OI = 4, OE = MI.getNumOperands(); OI < OE; ++OI) { assert(MI.getOperand(OI).isReg() && "expected register"); - switch (MI.getOperand(OI).getReg()) { + switch (MI.getOperand(OI).getReg().id()) { default: break; case ARM::LR: diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp index 1237e50..d550b70 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp @@ -96,7 +96,7 @@ void ARMTargetStreamer::emitSetFP(unsigned FpReg, unsigned SpReg, int64_t Offset) {} void ARMTargetStreamer::emitMovSP(unsigned Reg, int64_t Offset) {} void ARMTargetStreamer::emitPad(int64_t Offset) {} -void ARMTargetStreamer::emitRegSave(const SmallVectorImpl<unsigned> &RegList, +void ARMTargetStreamer::emitRegSave(const SmallVectorImpl<MCRegister> &RegList, bool isVector) {} void ARMTargetStreamer::emitUnwindRaw(int64_t StackOffset, const SmallVectorImpl<uint8_t> &Opcodes) { diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp index d18ded2..1f27c93 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp @@ -265,8 +265,6 @@ void RISCVInstPrinter::printRegReg(const MCInst *MI, unsigned OpNo, const MCOperand &MO = MI->getOperand(OpNo); assert(MO.isReg() && "printRegReg can only print register operands"); - if (MO.getReg() == RISCV::NoRegister) - return; printRegName(O, MO.getReg()); O << "("; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index ab49315..7d2a7b2 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -3679,7 +3679,6 @@ static SDValue lowerBuildVectorViaDominantValues(SDValue Op, SelectionDAG &DAG, if (V.isUndef()) continue; - ValueCounts.insert(std::make_pair(V, 0)); unsigned &Count = ValueCounts[V]; if (0 == Count) if (auto *CFP = dyn_cast<ConstantFPSDNode>(V)) diff --git a/llvm/lib/Target/RISCV/RISCVProcessors.td b/llvm/lib/Target/RISCV/RISCVProcessors.td index d4ec5ecc..c4e1a14 100644 --- a/llvm/lib/Target/RISCV/RISCVProcessors.td +++ b/llvm/lib/Target/RISCV/RISCVProcessors.td @@ -383,6 +383,25 @@ def SYNTACORE_SCR5_RV64 : RISCVProcessorModel<"syntacore-scr5-rv64", FeatureStdExtC], [TuneNoDefaultUnroll, FeaturePostRAScheduler]>; +def SYNTACORE_SCR7 : RISCVProcessorModel<"syntacore-scr7", + NoSchedModel, + [Feature64Bit, + FeatureStdExtI, + FeatureStdExtZicsr, + FeatureStdExtZifencei, + FeatureStdExtM, + FeatureStdExtA, + FeatureStdExtF, + FeatureStdExtD, + FeatureStdExtC, + FeatureStdExtV, + FeatureStdExtZba, + FeatureStdExtZbb, + FeatureStdExtZbc, + FeatureStdExtZbs, + FeatureStdExtZkn], + [TuneNoDefaultUnroll, FeaturePostRAScheduler]>; + def VENTANA_VEYRON_V1 : RISCVProcessorModel<"veyron-v1", NoSchedModel, [Feature64Bit, diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h index 98a712a..cb0bb77 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h @@ -130,11 +130,6 @@ struct RISCVRegisterInfo : public RISCVGenRegisterInfo { const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override; - bool doesRegClassHavePseudoInitUndef( - const TargetRegisterClass *RC) const override { - return isVRRegClass(RC); - } - static bool isVRRegClass(const TargetRegisterClass *RC) { return RISCVRI::isVRegClass(RC->TSFlags) && RISCVRI::getNF(RC->TSFlags) == 1; diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h index ea54ff1..bf9ed3f 100644 --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -306,8 +306,6 @@ public: unsigned getTailDupAggressiveThreshold() const { return TuneInfo->TailDupAggressiveThreshold; } - - bool supportsInitUndef() const override { return hasVInstructions(); } }; } // End llvm namespace diff --git a/llvm/lib/Target/X86/AsmParser/X86Operand.h b/llvm/lib/Target/X86/AsmParser/X86Operand.h index 7866978..0002785 100644 --- a/llvm/lib/Target/X86/AsmParser/X86Operand.h +++ b/llvm/lib/Target/X86/AsmParser/X86Operand.h @@ -47,7 +47,7 @@ struct X86Operand final : public MCParsedAsmOperand { }; struct RegOp { - unsigned RegNo; + MCRegister RegNo; }; struct PrefOp { @@ -60,11 +60,11 @@ struct X86Operand final : public MCParsedAsmOperand { }; struct MemOp { - unsigned SegReg; + MCRegister SegReg; const MCExpr *Disp; - unsigned BaseReg; - unsigned DefaultBaseReg; - unsigned IndexReg; + MCRegister BaseReg; + MCRegister DefaultBaseReg; + MCRegister IndexReg; unsigned Scale; unsigned Size; unsigned ModeSize; @@ -186,19 +186,19 @@ struct X86Operand final : public MCParsedAsmOperand { assert(Kind == Memory && "Invalid access!"); return Mem.Disp; } - unsigned getMemSegReg() const { + MCRegister getMemSegReg() const { assert(Kind == Memory && "Invalid access!"); return Mem.SegReg; } - unsigned getMemBaseReg() const { + MCRegister getMemBaseReg() const { assert(Kind == Memory && "Invalid access!"); return Mem.BaseReg; } - unsigned getMemDefaultBaseReg() const { + MCRegister getMemDefaultBaseReg() const { assert(Kind == Memory && "Invalid access!"); return Mem.DefaultBaseReg; } - unsigned getMemIndexReg() const { + MCRegister getMemIndexReg() const { assert(Kind == Memory && "Invalid access!"); return Mem.IndexReg; } @@ -600,8 +600,8 @@ struct X86Operand final : public MCParsedAsmOperand { void addMaskPairOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - unsigned Reg = getReg(); - switch (Reg) { + MCRegister Reg = getReg(); + switch (Reg.id()) { case X86::K0: case X86::K1: Reg = X86::K0_K1; @@ -673,11 +673,11 @@ struct X86Operand final : public MCParsedAsmOperand { } static std::unique_ptr<X86Operand> - CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc, + CreateReg(MCRegister Reg, SMLoc StartLoc, SMLoc EndLoc, bool AddressOf = false, SMLoc OffsetOfLoc = SMLoc(), StringRef SymName = StringRef(), void *OpDecl = nullptr) { auto Res = std::make_unique<X86Operand>(Register, StartLoc, EndLoc); - Res->Reg.RegNo = RegNo; + Res->Reg.RegNo = Reg; Res->AddressOf = AddressOf; Res->OffsetOfLoc = OffsetOfLoc; Res->SymName = SymName; @@ -718,11 +718,11 @@ struct X86Operand final : public MCParsedAsmOperand { void *OpDecl = nullptr, unsigned FrontendSize = 0, bool UseUpRegs = false, bool MaybeDirectBranchDest = true) { auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc); - Res->Mem.SegReg = 0; + Res->Mem.SegReg = MCRegister(); Res->Mem.Disp = Disp; - Res->Mem.BaseReg = 0; - Res->Mem.DefaultBaseReg = 0; - Res->Mem.IndexReg = 0; + Res->Mem.BaseReg = MCRegister(); + Res->Mem.DefaultBaseReg = MCRegister(); + Res->Mem.IndexReg = MCRegister(); Res->Mem.Scale = 1; Res->Mem.Size = Size; Res->Mem.ModeSize = ModeSize; @@ -737,10 +737,10 @@ struct X86Operand final : public MCParsedAsmOperand { /// Create a generalized memory operand. static std::unique_ptr<X86Operand> - CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp, - unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc, - SMLoc EndLoc, unsigned Size = 0, - unsigned DefaultBaseReg = X86::NoRegister, + CreateMem(unsigned ModeSize, MCRegister SegReg, const MCExpr *Disp, + MCRegister BaseReg, MCRegister IndexReg, unsigned Scale, + SMLoc StartLoc, SMLoc EndLoc, unsigned Size = 0, + MCRegister DefaultBaseReg = MCRegister(), StringRef SymName = StringRef(), void *OpDecl = nullptr, unsigned FrontendSize = 0, bool UseUpRegs = false, bool MaybeDirectBranchDest = true) { diff --git a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp index 2fb4991..d2ee0f1 100644 --- a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp +++ b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp @@ -1255,16 +1255,16 @@ bool X86InstructionSelector::selectExtract(MachineInstr &I, if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) { if (HasVLX) - I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr)); + I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rri)); else if (HasAVX) - I.setDesc(TII.get(X86::VEXTRACTF128rr)); + I.setDesc(TII.get(X86::VEXTRACTF128rri)); else return false; } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) { if (DstTy.getSizeInBits() == 128) - I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr)); + I.setDesc(TII.get(X86::VEXTRACTF32x4Zrri)); else if (DstTy.getSizeInBits() == 256) - I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr)); + I.setDesc(TII.get(X86::VEXTRACTF64x4Zrri)); else return false; } else @@ -1388,16 +1388,16 @@ bool X86InstructionSelector::selectInsert(MachineInstr &I, if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) { if (HasVLX) - I.setDesc(TII.get(X86::VINSERTF32x4Z256rr)); + I.setDesc(TII.get(X86::VINSERTF32x4Z256rri)); else if (HasAVX) - I.setDesc(TII.get(X86::VINSERTF128rr)); + I.setDesc(TII.get(X86::VINSERTF128rri)); else return false; } else if (DstTy.getSizeInBits() == 512 && HasAVX512) { if (InsertRegTy.getSizeInBits() == 128) - I.setDesc(TII.get(X86::VINSERTF32x4Zrr)); + I.setDesc(TII.get(X86::VINSERTF32x4Zrri)); else if (InsertRegTy.getSizeInBits() == 256) - I.setDesc(TII.get(X86::VINSERTF64x4Zrr)); + I.setDesc(TII.get(X86::VINSERTF64x4Zrri)); else return false; } else diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp index cb34b56..58b4527 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp @@ -516,8 +516,7 @@ void X86ATTInstPrinter::printU8Imm(const MCInst *MI, unsigned Op, void X86ATTInstPrinter::printSTiRegOperand(const MCInst *MI, unsigned OpNo, raw_ostream &OS) { - const MCOperand &Op = MI->getOperand(OpNo); - unsigned Reg = Op.getReg(); + MCRegister Reg = MI->getOperand(OpNo).getReg(); // Override the default printing to print st(0) instead st. if (Reg == X86::ST0) markup(OS, Markup::Register) << "%st(0)"; diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp index 82ada25..87b46a3 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp @@ -262,7 +262,7 @@ static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII) { if (MemoryOperand < 0) return false; unsigned BaseRegNum = MemoryOperand + CurOp + X86::AddrBaseReg; - unsigned BaseReg = MI.getOperand(BaseRegNum).getReg(); + MCRegister BaseReg = MI.getOperand(BaseRegNum).getReg(); return (BaseReg == X86::RIP); } @@ -302,7 +302,7 @@ uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const { if (MemoryOperand != -1) MemoryOperand += X86II::getOperandBias(Desc); - unsigned SegmentReg = 0; + MCRegister SegmentReg; if (MemoryOperand >= 0) { // Check for explicit segment override on memory operand. SegmentReg = Inst.getOperand(MemoryOperand + X86::AddrSegmentReg).getReg(); @@ -338,7 +338,7 @@ uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const { if (MemoryOperand >= 0) { unsigned BaseRegNum = MemoryOperand + X86::AddrBaseReg; - unsigned BaseReg = Inst.getOperand(BaseRegNum).getReg(); + MCRegister BaseReg = Inst.getOperand(BaseRegNum).getReg(); if (BaseReg == X86::ESP || BaseReg == X86::EBP) return X86::SS_Encoding; } diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h index a3af9af..5694847 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h +++ b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h @@ -329,8 +329,8 @@ enum EncodingOfSegmentOverridePrefix : uint8_t { /// Given a segment register, return the encoding of the segment override /// prefix for it. inline EncodingOfSegmentOverridePrefix -getSegmentOverridePrefixForReg(unsigned Reg) { - switch (Reg) { +getSegmentOverridePrefixForReg(MCRegister Reg) { + switch (Reg.id()) { default: llvm_unreachable("Unknown segment register!"); case X86::CS: @@ -1156,52 +1156,52 @@ inline int getMemoryOperandNo(uint64_t TSFlags) { } /// \returns true if the register is a XMM. -inline bool isXMMReg(unsigned RegNo) { +inline bool isXMMReg(MCRegister Reg) { static_assert(X86::XMM15 - X86::XMM0 == 15, "XMM0-15 registers are not continuous"); static_assert(X86::XMM31 - X86::XMM16 == 15, "XMM16-31 registers are not continuous"); - return (RegNo >= X86::XMM0 && RegNo <= X86::XMM15) || - (RegNo >= X86::XMM16 && RegNo <= X86::XMM31); + return (Reg >= X86::XMM0 && Reg <= X86::XMM15) || + (Reg >= X86::XMM16 && Reg <= X86::XMM31); } /// \returns true if the register is a YMM. -inline bool isYMMReg(unsigned RegNo) { +inline bool isYMMReg(MCRegister Reg) { static_assert(X86::YMM15 - X86::YMM0 == 15, "YMM0-15 registers are not continuous"); static_assert(X86::YMM31 - X86::YMM16 == 15, "YMM16-31 registers are not continuous"); - return (RegNo >= X86::YMM0 && RegNo <= X86::YMM15) || - (RegNo >= X86::YMM16 && RegNo <= X86::YMM31); + return (Reg >= X86::YMM0 && Reg <= X86::YMM15) || + (Reg >= X86::YMM16 && Reg <= X86::YMM31); } /// \returns true if the register is a ZMM. -inline bool isZMMReg(unsigned RegNo) { +inline bool isZMMReg(MCRegister Reg) { static_assert(X86::ZMM31 - X86::ZMM0 == 31, "ZMM registers are not continuous"); - return RegNo >= X86::ZMM0 && RegNo <= X86::ZMM31; + return Reg >= X86::ZMM0 && Reg <= X86::ZMM31; } -/// \returns true if \p RegNo is an apx extended register. -inline bool isApxExtendedReg(unsigned RegNo) { +/// \returns true if \p Reg is an apx extended register. +inline bool isApxExtendedReg(MCRegister Reg) { static_assert(X86::R31WH - X86::R16 == 95, "EGPRs are not continuous"); - return RegNo >= X86::R16 && RegNo <= X86::R31WH; + return Reg >= X86::R16 && Reg <= X86::R31WH; } /// \returns true if the MachineOperand is a x86-64 extended (r8 or /// higher) register, e.g. r8, xmm8, xmm13, etc. -inline bool isX86_64ExtendedReg(unsigned RegNo) { - if ((RegNo >= X86::XMM8 && RegNo <= X86::XMM15) || - (RegNo >= X86::XMM16 && RegNo <= X86::XMM31) || - (RegNo >= X86::YMM8 && RegNo <= X86::YMM15) || - (RegNo >= X86::YMM16 && RegNo <= X86::YMM31) || - (RegNo >= X86::ZMM8 && RegNo <= X86::ZMM31)) +inline bool isX86_64ExtendedReg(MCRegister Reg) { + if ((Reg >= X86::XMM8 && Reg <= X86::XMM15) || + (Reg >= X86::XMM16 && Reg <= X86::XMM31) || + (Reg >= X86::YMM8 && Reg <= X86::YMM15) || + (Reg >= X86::YMM16 && Reg <= X86::YMM31) || + (Reg >= X86::ZMM8 && Reg <= X86::ZMM31)) return true; - if (isApxExtendedReg(RegNo)) + if (isApxExtendedReg(Reg)) return true; - switch (RegNo) { + switch (Reg.id()) { default: break; case X86::R8: @@ -1299,15 +1299,15 @@ inline bool canUseApxExtendedReg(const MCInstrDesc &Desc) { /// \returns true if the MemoryOperand is a 32 extended (zmm16 or higher) /// registers, e.g. zmm21, etc. -static inline bool is32ExtendedReg(unsigned RegNo) { - return ((RegNo >= X86::XMM16 && RegNo <= X86::XMM31) || - (RegNo >= X86::YMM16 && RegNo <= X86::YMM31) || - (RegNo >= X86::ZMM16 && RegNo <= X86::ZMM31)); +static inline bool is32ExtendedReg(MCRegister Reg) { + return ((Reg >= X86::XMM16 && Reg <= X86::XMM31) || + (Reg >= X86::YMM16 && Reg <= X86::YMM31) || + (Reg >= X86::ZMM16 && Reg <= X86::ZMM31)); } -inline bool isX86_64NonExtLowByteReg(unsigned reg) { - return (reg == X86::SPL || reg == X86::BPL || reg == X86::SIL || - reg == X86::DIL); +inline bool isX86_64NonExtLowByteReg(MCRegister Reg) { + return (Reg == X86::SPL || Reg == X86::BPL || Reg == X86::SIL || + Reg == X86::DIL); } /// \returns true if this is a masked instruction. @@ -1321,7 +1321,7 @@ inline bool isKMergeMasked(uint64_t TSFlags) { } /// \returns true if the intruction needs a SIB. -inline bool needSIB(unsigned BaseReg, unsigned IndexReg, bool In64BitMode) { +inline bool needSIB(MCRegister BaseReg, MCRegister IndexReg, bool In64BitMode) { // The SIB byte must be used if there is an index register. if (IndexReg) return true; @@ -1329,7 +1329,7 @@ inline bool needSIB(unsigned BaseReg, unsigned IndexReg, bool In64BitMode) { // The SIB byte must be used if the base is ESP/RSP/R12/R20/R28, all of // which encode to an R/M value of 4, which indicates that a SIB byte is // present. - switch (BaseReg) { + switch (BaseReg.id()) { default: // If there is no base register and we're in 64-bit mode, we need a SIB // byte to emit an addr that is just 'disp32' (the non-RIP relative form). diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp index f97777f..ad7fdd7 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp @@ -329,7 +329,7 @@ bool X86::optimizeINCDEC(MCInst &MI, bool In64BitMode) { return true; } -static bool isARegister(unsigned Reg) { +static bool isARegister(MCRegister Reg) { return Reg == X86::AL || Reg == X86::AX || Reg == X86::EAX || Reg == X86::RAX; } @@ -364,7 +364,7 @@ bool X86::optimizeMOV(MCInst &MI, bool In64BitMode) { unsigned RegOp = IsStore ? 0 : 5; unsigned AddrOp = AddrBase + 3; // Check whether the destination register can be fixed. - unsigned Reg = MI.getOperand(RegOp).getReg(); + MCRegister Reg = MI.getOperand(RegOp).getReg(); if (!isARegister(Reg)) return false; // Check whether this is an absolute address. @@ -436,7 +436,7 @@ static bool optimizeToFixedRegisterForm(MCInst &MI) { FROM_TO(XOR64ri32, XOR64i32) } // Check whether the destination register can be fixed. - unsigned Reg = MI.getOperand(0).getReg(); + MCRegister Reg = MI.getOperand(0).getReg(); if (!isARegister(Reg)) return false; diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp index 9cc72d3..95038cc 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp @@ -223,14 +223,14 @@ using namespace llvm; CASE_AVX_INS_COMMON(Inst##SD4, , mr_Int) \ CASE_AVX_INS_COMMON(Inst##SS4, , mr_Int) -static unsigned getVectorRegSize(unsigned RegNo) { - if (X86II::isZMMReg(RegNo)) +static unsigned getVectorRegSize(MCRegister Reg) { + if (X86II::isZMMReg(Reg)) return 512; - if (X86II::isYMMReg(RegNo)) + if (X86II::isYMMReg(Reg)) return 256; - if (X86II::isXMMReg(RegNo)) + if (X86II::isXMMReg(Reg)) return 128; - if (X86::MM0 <= RegNo && RegNo <= X86::MM7) + if (Reg >= X86::MM0 && Reg <= X86::MM7) return 64; llvm_unreachable("Unknown vector reg!"); @@ -238,7 +238,7 @@ static unsigned getVectorRegSize(unsigned RegNo) { static unsigned getRegOperandNumElts(const MCInst *MI, unsigned ScalarSize, unsigned OperandIndex) { - unsigned OpReg = MI->getOperand(OperandIndex).getReg(); + MCRegister OpReg = MI->getOperand(OperandIndex).getReg(); return getVectorRegSize(OpReg) / ScalarSize; } @@ -703,14 +703,14 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS, DestName = getRegName(MI->getOperand(0).getReg()); break; - case X86::INSERTPSrr: - case X86::VINSERTPSrr: - case X86::VINSERTPSZrr: + case X86::INSERTPSrri: + case X86::VINSERTPSrri: + case X86::VINSERTPSZrri: Src2Name = getRegName(MI->getOperand(2).getReg()); [[fallthrough]]; - case X86::INSERTPSrm: - case X86::VINSERTPSrm: - case X86::VINSERTPSZrm: + case X86::INSERTPSrmi: + case X86::VINSERTPSrmi: + case X86::VINSERTPSZrmi: DestName = getRegName(MI->getOperand(0).getReg()); Src1Name = getRegName(MI->getOperand(1).getReg()); if (MI->getOperand(NumOperands - 1).isImm()) @@ -1158,13 +1158,13 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS, DestName = getRegName(MI->getOperand(0).getReg()); break; - case X86::VPERM2F128rr: - case X86::VPERM2I128rr: + case X86::VPERM2F128rri: + case X86::VPERM2I128rri: Src2Name = getRegName(MI->getOperand(2).getReg()); [[fallthrough]]; - case X86::VPERM2F128rm: - case X86::VPERM2I128rm: + case X86::VPERM2F128rmi: + case X86::VPERM2I128rmi: // For instruction comments purpose, assume the 256-bit vector is v4i64. if (MI->getOperand(NumOperands - 1).isImm()) DecodeVPERM2X128Mask(4, MI->getOperand(NumOperands - 1).getImm(), diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp index 0e00b4d..cd8b9aa 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp @@ -487,8 +487,7 @@ void X86IntelInstPrinter::printU8Imm(const MCInst *MI, unsigned Op, void X86IntelInstPrinter::printSTiRegOperand(const MCInst *MI, unsigned OpNo, raw_ostream &OS) { - const MCOperand &Op = MI->getOperand(OpNo); - unsigned Reg = Op.getReg(); + MCRegister Reg = MI->getOperand(OpNo).getReg(); // Override the default printing to print st(0) instead st. if (Reg == X86::ST0) OS << "st(0)"; diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp index 469a385..1bfb080 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -190,7 +190,7 @@ public: setR(getRegEncoding(MI, OpNum)); } void setX(const MCInst &MI, unsigned OpNum, unsigned Shift = 3) { - unsigned Reg = MI.getOperand(OpNum).getReg(); + MCRegister Reg = MI.getOperand(OpNum).getReg(); // X is used to extend vector register only when shift is not 3. if (Shift != 3 && X86II::isApxExtendedReg(Reg)) return; @@ -220,7 +220,7 @@ public: } void setM(bool V) { M = V; } void setXX2(const MCInst &MI, unsigned OpNum) { - unsigned Reg = MI.getOperand(OpNum).getReg(); + MCRegister Reg = MI.getOperand(OpNum).getReg(); unsigned Encoding = MRI.getEncodingValue(Reg); setX(Encoding); // Index can be a vector register while X2 is used to extend GPR only. @@ -228,7 +228,7 @@ public: setX2(Encoding); } void setBB2(const MCInst &MI, unsigned OpNum) { - unsigned Reg = MI.getOperand(OpNum).getReg(); + MCRegister Reg = MI.getOperand(OpNum).getReg(); unsigned Encoding = MRI.getEncodingValue(Reg); setB(Encoding); // Base can be a vector register while B2 is used to extend GPR only @@ -243,7 +243,7 @@ public: // Only needed with VSIB which don't use VVVV. if (HasVEX_4V) return; - unsigned Reg = MI.getOperand(OpNum).getReg(); + MCRegister Reg = MI.getOperand(OpNum).getReg(); if (X86II::isApxExtendedReg(Reg)) return; setV2(MRI.getEncodingValue(Reg)); @@ -614,7 +614,7 @@ void X86MCCodeEmitter::emitMemModRMByte( const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg); const MCOperand &Scale = MI.getOperand(Op + X86::AddrScaleAmt); const MCOperand &IndexReg = MI.getOperand(Op + X86::AddrIndexReg); - unsigned BaseReg = Base.getReg(); + MCRegister BaseReg = Base.getReg(); // Handle %rip relative addressing. if (BaseReg == X86::RIP || @@ -746,7 +746,7 @@ void X86MCCodeEmitter::emitMemModRMByte( // This is the [REG]+disp16 case. emitByte(modRMByte(2, RegOpcodeField, RMfield), CB); } else { - assert(IndexReg.getReg() == 0 && "Unexpected index register!"); + assert(!IndexReg.getReg() && "Unexpected index register!"); // There is no BaseReg; this is the plain [disp16] case. emitByte(modRMByte(0, RegOpcodeField, 6), CB); } @@ -768,7 +768,7 @@ void X86MCCodeEmitter::emitMemModRMByte( // Determine whether a SIB byte is needed. if (!ForceSIB && !X86II::needSIB(BaseReg, IndexReg.getReg(), STI.hasFeature(X86::Is64Bit))) { - if (BaseReg == 0) { // [disp32] in X86-32 mode + if (!BaseReg) { // [disp32] in X86-32 mode emitByte(modRMByte(0, RegOpcodeField, 5), CB); emitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, StartByte, CB, Fixups); return; @@ -831,7 +831,7 @@ void X86MCCodeEmitter::emitMemModRMByte( bool ForceDisp32 = false; bool ForceDisp8 = false; int ImmOffset = 0; - if (BaseReg == 0) { + if (!BaseReg) { // If there is no base register, we emit the special case SIB byte with // MOD=0, BASE=5, to JUST get the index, scale, and displacement. BaseRegNo = 5; @@ -968,7 +968,7 @@ X86MCCodeEmitter::emitVEXOpcodePrefix(int MemOperand, const MCInst &MI, const MCOperand &MO = MI.getOperand(I); if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + MCRegister Reg = MO.getReg(); if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH) report_fatal_error( "Cannot encode high byte register in VEX/EVEX-prefixed instruction"); @@ -1351,7 +1351,7 @@ PrefixKind X86MCCodeEmitter::emitREXPrefix(int MemOperand, const MCInst &MI, #ifndef NDEBUG HasRegOp = true; #endif - unsigned Reg = MO.getReg(); + MCRegister Reg = MO.getReg(); if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH) UsesHighByteReg = true; // If it accesses SPL, BPL, SIL, or DIL, then it requires a REX prefix. @@ -1449,7 +1449,7 @@ PrefixKind X86MCCodeEmitter::emitREXPrefix(int MemOperand, const MCInst &MI, void X86MCCodeEmitter::emitSegmentOverridePrefix( unsigned SegOperand, const MCInst &MI, SmallVectorImpl<char> &CB) const { // Check for explicit segment override on memory operand. - if (unsigned Reg = MI.getOperand(SegOperand).getReg()) + if (MCRegister Reg = MI.getOperand(SegOperand).getReg()) emitByte(X86::getSegmentOverridePrefixForReg(Reg), CB); } diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp index 07c2c73..fe3c42e 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp @@ -79,8 +79,8 @@ static bool isMemOperand(const MCInst &MI, unsigned Op, unsigned RegClassID) { const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg); const MCRegisterClass &RC = X86MCRegisterClasses[RegClassID]; - return (Base.isReg() && Base.getReg() != 0 && RC.contains(Base.getReg())) || - (Index.isReg() && Index.getReg() != 0 && RC.contains(Index.getReg())); + return (Base.isReg() && Base.getReg() && RC.contains(Base.getReg())) || + (Index.isReg() && Index.getReg() && RC.contains(Index.getReg())); } bool X86_MC::is16BitMemOperand(const MCInst &MI, unsigned Op, @@ -88,8 +88,8 @@ bool X86_MC::is16BitMemOperand(const MCInst &MI, unsigned Op, const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg); const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg); - if (STI.hasFeature(X86::Is16Bit) && Base.isReg() && Base.getReg() == 0 && - Index.isReg() && Index.getReg() == 0) + if (STI.hasFeature(X86::Is16Bit) && Base.isReg() && !Base.getReg() && + Index.isReg() && !Index.getReg()) return true; return isMemOperand(MI, Op, X86::GR16RegClassID); } @@ -98,7 +98,7 @@ bool X86_MC::is32BitMemOperand(const MCInst &MI, unsigned Op) { const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg); const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg); if (Base.isReg() && Base.getReg() == X86::EIP) { - assert(Index.isReg() && Index.getReg() == 0 && "Invalid eip-based address"); + assert(Index.isReg() && !Index.getReg() && "Invalid eip-based address"); return true; } if (Index.isReg() && Index.getReg() == X86::EIZ) @@ -128,7 +128,7 @@ bool X86_MC::needsAddressSizeOverride(const MCInst &MI, default: break; case X86II::RawFrmDstSrc: { - unsigned siReg = MI.getOperand(1).getReg(); + MCRegister siReg = MI.getOperand(1).getReg(); assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && @@ -137,12 +137,12 @@ bool X86_MC::needsAddressSizeOverride(const MCInst &MI, (Is32BitMode && siReg == X86::SI); } case X86II::RawFrmSrc: { - unsigned siReg = MI.getOperand(0).getReg(); + MCRegister siReg = MI.getOperand(0).getReg(); return (!Is32BitMode && siReg == X86::ESI) || (Is32BitMode && siReg == X86::SI); } case X86II::RawFrmDst: { - unsigned siReg = MI.getOperand(0).getReg(); + MCRegister siReg = MI.getOperand(0).getReg(); return (!Is32BitMode && siReg == X86::EDI) || (Is32BitMode && siReg == X86::DI); } @@ -666,7 +666,7 @@ std::optional<uint64_t> X86MCInstrAnalysis::evaluateMemoryOperandAddress( const MCOperand &IndexReg = Inst.getOperand(MemOpStart + X86::AddrIndexReg); const MCOperand &ScaleAmt = Inst.getOperand(MemOpStart + X86::AddrScaleAmt); const MCOperand &Disp = Inst.getOperand(MemOpStart + X86::AddrDisp); - if (SegReg.getReg() != 0 || IndexReg.getReg() != 0 || ScaleAmt.getImm() != 1 || + if (SegReg.getReg() || IndexReg.getReg() || ScaleAmt.getImm() != 1 || !Disp.isImm()) return std::nullopt; @@ -693,8 +693,8 @@ X86MCInstrAnalysis::getMemoryOperandRelocationOffset(const MCInst &Inst, const MCOperand &ScaleAmt = Inst.getOperand(MemOpStart + X86::AddrScaleAmt); const MCOperand &Disp = Inst.getOperand(MemOpStart + X86::AddrDisp); // Must be a simple rip-relative address. - if (BaseReg.getReg() != X86::RIP || SegReg.getReg() != 0 || - IndexReg.getReg() != 0 || ScaleAmt.getImm() != 1 || !Disp.isImm()) + if (BaseReg.getReg() != X86::RIP || SegReg.getReg() || IndexReg.getReg() || + ScaleAmt.getImm() != 1 || !Disp.isImm()) return std::nullopt; // rip-relative ModR/M immediate is 32 bits. assert(Size > 4 && "invalid instruction size for rip-relative lea"); diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86TargetStreamer.h b/llvm/lib/Target/X86/MCTargetDesc/X86TargetStreamer.h index 11bffa0..8cfd938 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86TargetStreamer.h +++ b/llvm/lib/Target/X86/MCTargetDesc/X86TargetStreamer.h @@ -27,12 +27,12 @@ public: virtual bool emitFPOData(const MCSymbol *ProcSym, SMLoc L = {}) { return false; } - virtual bool emitFPOPushReg(unsigned Reg, SMLoc L = {}) { return false; } + virtual bool emitFPOPushReg(MCRegister Reg, SMLoc L = {}) { return false; } virtual bool emitFPOStackAlloc(unsigned StackAlloc, SMLoc L = {}) { return false; } virtual bool emitFPOStackAlign(unsigned Align, SMLoc L = {}) { return false; } - virtual bool emitFPOSetFrame(unsigned Reg, SMLoc L = {}) { return false; } + virtual bool emitFPOSetFrame(MCRegister Reg, SMLoc L = {}) { return false; } }; /// Implements X86-only null emission. diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFTargetStreamer.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFTargetStreamer.cpp index 7f8da40..678a809 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFTargetStreamer.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFTargetStreamer.cpp @@ -36,10 +36,10 @@ public: bool emitFPOEndPrologue(SMLoc L) override; bool emitFPOEndProc(SMLoc L) override; bool emitFPOData(const MCSymbol *ProcSym, SMLoc L) override; - bool emitFPOPushReg(unsigned Reg, SMLoc L) override; + bool emitFPOPushReg(MCRegister Reg, SMLoc L) override; bool emitFPOStackAlloc(unsigned StackAlloc, SMLoc L) override; bool emitFPOStackAlign(unsigned Align, SMLoc L) override; - bool emitFPOSetFrame(unsigned Reg, SMLoc L) override; + bool emitFPOSetFrame(MCRegister Reg, SMLoc L) override; }; /// Represents a single FPO directive. @@ -90,10 +90,10 @@ public: bool emitFPOEndPrologue(SMLoc L) override; bool emitFPOEndProc(SMLoc L) override; bool emitFPOData(const MCSymbol *ProcSym, SMLoc L) override; - bool emitFPOPushReg(unsigned Reg, SMLoc L) override; + bool emitFPOPushReg(MCRegister Reg, SMLoc L) override; bool emitFPOStackAlloc(unsigned StackAlloc, SMLoc L) override; bool emitFPOStackAlign(unsigned Align, SMLoc L) override; - bool emitFPOSetFrame(unsigned Reg, SMLoc L) override; + bool emitFPOSetFrame(MCRegister Reg, SMLoc L) override; }; } // end namespace @@ -123,7 +123,7 @@ bool X86WinCOFFAsmTargetStreamer::emitFPOData(const MCSymbol *ProcSym, return false; } -bool X86WinCOFFAsmTargetStreamer::emitFPOPushReg(unsigned Reg, SMLoc L) { +bool X86WinCOFFAsmTargetStreamer::emitFPOPushReg(MCRegister Reg, SMLoc L) { OS << "\t.cv_fpo_pushreg\t"; InstPrinter.printRegName(OS, Reg); OS << '\n'; @@ -141,7 +141,7 @@ bool X86WinCOFFAsmTargetStreamer::emitFPOStackAlign(unsigned Align, SMLoc L) { return false; } -bool X86WinCOFFAsmTargetStreamer::emitFPOSetFrame(unsigned Reg, SMLoc L) { +bool X86WinCOFFAsmTargetStreamer::emitFPOSetFrame(MCRegister Reg, SMLoc L) { OS << "\t.cv_fpo_setframe\t"; InstPrinter.printRegName(OS, Reg); OS << '\n'; @@ -201,7 +201,7 @@ bool X86WinCOFFTargetStreamer::emitFPOEndProc(SMLoc L) { return false; } -bool X86WinCOFFTargetStreamer::emitFPOSetFrame(unsigned Reg, SMLoc L) { +bool X86WinCOFFTargetStreamer::emitFPOSetFrame(MCRegister Reg, SMLoc L) { if (checkInFPOPrologue(L)) return true; FPOInstruction Inst; @@ -212,7 +212,7 @@ bool X86WinCOFFTargetStreamer::emitFPOSetFrame(unsigned Reg, SMLoc L) { return false; } -bool X86WinCOFFTargetStreamer::emitFPOPushReg(unsigned Reg, SMLoc L) { +bool X86WinCOFFTargetStreamer::emitFPOPushReg(MCRegister Reg, SMLoc L) { if (checkInFPOPrologue(L)) return true; FPOInstruction Inst; diff --git a/llvm/lib/Target/X86/X86CompressEVEX.cpp b/llvm/lib/Target/X86/X86CompressEVEX.cpp index 7343af1..a909440 100644 --- a/llvm/lib/Target/X86/X86CompressEVEX.cpp +++ b/llvm/lib/Target/X86/X86CompressEVEX.cpp @@ -138,8 +138,8 @@ static bool performCustomAdjustments(MachineInstr &MI, unsigned NewOpc) { case X86::VSHUFI32X4Z256rri: case X86::VSHUFI64X2Z256rmi: case X86::VSHUFI64X2Z256rri: { - assert((NewOpc == X86::VPERM2F128rr || NewOpc == X86::VPERM2I128rr || - NewOpc == X86::VPERM2F128rm || NewOpc == X86::VPERM2I128rm) && + assert((NewOpc == X86::VPERM2F128rri || NewOpc == X86::VPERM2I128rri || + NewOpc == X86::VPERM2F128rmi || NewOpc == X86::VPERM2I128rmi) && "Unexpected new opcode!"); MachineOperand &Imm = MI.getOperand(MI.getNumExplicitOperands() - 1); int64_t ImmVal = Imm.getImm(); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 053b235..d73c491 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -3489,7 +3489,7 @@ unsigned X86TargetLowering::preferedOpcodeForCmpEqPiecesOfOperand( // We prefer rotate for vectors of if we won't get a zext mask with SRL // (PreferRotate will be set in the latter case). - if (PreferRotate || VT.isVector()) + if (PreferRotate || !MayTransformRotate || VT.isVector()) return ShiftOpc; // Non-vector type and we have a zext mask with SRL. diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td index 0dc023e..b6bf34a 100644 --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -368,7 +368,7 @@ multiclass vinsert_for_size_split<int Opcode, X86VectorVTInfo From, SDPatternOperator vinsert_for_mask, X86FoldableSchedWrite sched> { let hasSideEffects = 0, ExeDomain = To.ExeDomain in { - defm rr : AVX512_maskable_split<Opcode, MRMSrcReg, To, (outs To.RC:$dst), + defm rri : AVX512_maskable_split<Opcode, MRMSrcReg, To, (outs To.RC:$dst), (ins To.RC:$src1, From.RC:$src2, u8imm:$src3), "vinsert" # From.EltTypeName # "x" # From.NumElts, "$src3, $src2, $src1", "$src1, $src2, $src3", @@ -380,7 +380,7 @@ multiclass vinsert_for_size_split<int Opcode, X86VectorVTInfo From, (iPTR imm))>, AVX512AIi8Base, EVEX, VVVV, Sched<[sched]>; let mayLoad = 1 in - defm rm : AVX512_maskable_split<Opcode, MRMSrcMem, To, (outs To.RC:$dst), + defm rmi : AVX512_maskable_split<Opcode, MRMSrcMem, To, (outs To.RC:$dst), (ins To.RC:$src1, From.MemOp:$src2, u8imm:$src3), "vinsert" # From.EltTypeName # "x" # From.NumElts, "$src3, $src2, $src1", "$src1, $src2, $src3", @@ -408,7 +408,7 @@ multiclass vinsert_for_size_lowering<string InstrStr, X86VectorVTInfo From, let Predicates = p in { def : Pat<(vinsert_insert:$ins (To.VT To.RC:$src1), (From.VT From.RC:$src2), (iPTR imm)), - (To.VT (!cast<Instruction>(InstrStr#"rr") + (To.VT (!cast<Instruction>(InstrStr#"rri") To.RC:$src1, From.RC:$src2, (INSERT_get_vinsert_imm To.RC:$ins)))>; @@ -416,7 +416,7 @@ multiclass vinsert_for_size_lowering<string InstrStr, X86VectorVTInfo From, (To.VT To.RC:$src1), (From.VT (From.LdFrag addr:$src2)), (iPTR imm)), - (To.VT (!cast<Instruction>(InstrStr#"rm") + (To.VT (!cast<Instruction>(InstrStr#"rmi") To.RC:$src1, addr:$src2, (INSERT_get_vinsert_imm To.RC:$ins)))>; } @@ -529,7 +529,7 @@ let Predicates = p in { (From.VT From.RC:$src2), (iPTR imm))), Cast.RC:$src0)), - (!cast<Instruction>(InstrStr#"rrk") + (!cast<Instruction>(InstrStr#"rrik") Cast.RC:$src0, Cast.KRCWM:$mask, To.RC:$src1, From.RC:$src2, (INSERT_get_vinsert_imm To.RC:$ins))>; def : Pat<(Cast.VT @@ -541,7 +541,7 @@ let Predicates = p in { (From.LdFrag addr:$src2))), (iPTR imm))), Cast.RC:$src0)), - (!cast<Instruction>(InstrStr#"rmk") + (!cast<Instruction>(InstrStr#"rmik") Cast.RC:$src0, Cast.KRCWM:$mask, To.RC:$src1, addr:$src2, (INSERT_get_vinsert_imm To.RC:$ins))>; @@ -552,7 +552,7 @@ let Predicates = p in { (From.VT From.RC:$src2), (iPTR imm))), Cast.ImmAllZerosV)), - (!cast<Instruction>(InstrStr#"rrkz") + (!cast<Instruction>(InstrStr#"rrikz") Cast.KRCWM:$mask, To.RC:$src1, From.RC:$src2, (INSERT_get_vinsert_imm To.RC:$ins))>; def : Pat<(Cast.VT @@ -562,7 +562,7 @@ let Predicates = p in { (From.VT (From.LdFrag addr:$src2)), (iPTR imm))), Cast.ImmAllZerosV)), - (!cast<Instruction>(InstrStr#"rmkz") + (!cast<Instruction>(InstrStr#"rmikz") Cast.KRCWM:$mask, To.RC:$src1, addr:$src2, (INSERT_get_vinsert_imm To.RC:$ins))>; } @@ -649,12 +649,12 @@ defm : vinsert_for_mask_cast<"VINSERTI64x4Z", v32i8x_info, v64i8_info, // vinsertps - insert f32 to XMM let ExeDomain = SSEPackedSingle in { let isCommutable = 1 in -def VINSERTPSZrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst), +def VINSERTPSZrri : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, u8imm:$src3), "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, timm:$src3))]>, EVEX, VVVV, Sched<[SchedWriteFShuffle.XMM]>; -def VINSERTPSZrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst), +def VINSERTPSZrmi : AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst), (ins VR128X:$src1, f32mem:$src2, u8imm:$src3), "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", [(set VR128X:$dst, (X86insertps VR128X:$src1, @@ -677,7 +677,7 @@ multiclass vextract_for_size_split<int Opcode, SchedWrite SchedRR, SchedWrite SchedMR> { let hasSideEffects = 0, ExeDomain = To.ExeDomain in { - defm rr : AVX512_maskable_split<Opcode, MRMDestReg, To, (outs To.RC:$dst), + defm rri : AVX512_maskable_split<Opcode, MRMDestReg, To, (outs To.RC:$dst), (ins From.RC:$src1, u8imm:$idx), "vextract" # To.EltTypeName # "x" # To.NumElts, "$idx, $src1", "$src1, $idx", @@ -685,7 +685,7 @@ multiclass vextract_for_size_split<int Opcode, (vextract_for_mask:$idx (From.VT From.RC:$src1), (iPTR imm))>, AVX512AIi8Base, EVEX, Sched<[SchedRR]>; - def mr : AVX512AIi8<Opcode, MRMDestMem, (outs), + def mri : AVX512AIi8<Opcode, MRMDestMem, (outs), (ins To.MemOp:$dst, From.RC:$src1, u8imm:$idx), "vextract" # To.EltTypeName # "x" # To.NumElts # "\t{$idx, $src1, $dst|$dst, $src1, $idx}", @@ -695,7 +695,7 @@ multiclass vextract_for_size_split<int Opcode, Sched<[SchedMR]>; let mayStore = 1, hasSideEffects = 0 in - def mrk : AVX512AIi8<Opcode, MRMDestMem, (outs), + def mrik : AVX512AIi8<Opcode, MRMDestMem, (outs), (ins To.MemOp:$dst, To.KRCWM:$mask, From.RC:$src1, u8imm:$idx), "vextract" # To.EltTypeName # "x" # To.NumElts # @@ -718,12 +718,12 @@ multiclass vextract_for_size_lowering<string InstrStr, X86VectorVTInfo From, SDNodeXForm EXTRACT_get_vextract_imm, list<Predicate> p> { let Predicates = p in { def : Pat<(vextract_extract:$ext (From.VT From.RC:$src1), (iPTR imm)), - (To.VT (!cast<Instruction>(InstrStr#"rr") + (To.VT (!cast<Instruction>(InstrStr#"rri") From.RC:$src1, (EXTRACT_get_vextract_imm To.RC:$ext)))>; def : Pat<(store (To.VT (vextract_extract:$ext (From.VT From.RC:$src1), (iPTR imm))), addr:$dst), - (!cast<Instruction>(InstrStr#"mr") addr:$dst, From.RC:$src1, + (!cast<Instruction>(InstrStr#"mri") addr:$dst, From.RC:$src1, (EXTRACT_get_vextract_imm To.RC:$ext))>; } } @@ -828,31 +828,31 @@ defm : vextract_for_size_lowering<"VEXTRACTF64x4Z", v32bf16_info, v16bf16x_info, // smaller extract to enable EVEX->VEX. let Predicates = [NoVLX, HasEVEX512] in { def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 2))), - (v2i64 (VEXTRACTI128rr + (v2i64 (VEXTRACTI128rri (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm)), (iPTR 1)))>; def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 2))), - (v2f64 (VEXTRACTF128rr + (v2f64 (VEXTRACTF128rri (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm)), (iPTR 1)))>; def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 4))), - (v4i32 (VEXTRACTI128rr + (v4i32 (VEXTRACTI128rri (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm)), (iPTR 1)))>; def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 4))), - (v4f32 (VEXTRACTF128rr + (v4f32 (VEXTRACTF128rri (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm)), (iPTR 1)))>; def : Pat<(v8i16 (extract_subvector (v32i16 VR512:$src), (iPTR 8))), - (v8i16 (VEXTRACTI128rr + (v8i16 (VEXTRACTI128rri (v16i16 (EXTRACT_SUBREG (v32i16 VR512:$src), sub_ymm)), (iPTR 1)))>; def : Pat<(v8f16 (extract_subvector (v32f16 VR512:$src), (iPTR 8))), - (v8f16 (VEXTRACTF128rr + (v8f16 (VEXTRACTF128rri (v16f16 (EXTRACT_SUBREG (v32f16 VR512:$src), sub_ymm)), (iPTR 1)))>; def : Pat<(v16i8 (extract_subvector (v64i8 VR512:$src), (iPTR 16))), - (v16i8 (VEXTRACTI128rr + (v16i8 (VEXTRACTI128rri (v32i8 (EXTRACT_SUBREG (v64i8 VR512:$src), sub_ymm)), (iPTR 1)))>; } @@ -861,31 +861,31 @@ def : Pat<(v16i8 (extract_subvector (v64i8 VR512:$src), (iPTR 16))), // smaller extract to enable EVEX->VEX. let Predicates = [HasVLX] in { def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 2))), - (v2i64 (VEXTRACTI32x4Z256rr + (v2i64 (VEXTRACTI32x4Z256rri (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm)), (iPTR 1)))>; def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 2))), - (v2f64 (VEXTRACTF32x4Z256rr + (v2f64 (VEXTRACTF32x4Z256rri (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm)), (iPTR 1)))>; def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 4))), - (v4i32 (VEXTRACTI32x4Z256rr + (v4i32 (VEXTRACTI32x4Z256rri (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm)), (iPTR 1)))>; def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 4))), - (v4f32 (VEXTRACTF32x4Z256rr + (v4f32 (VEXTRACTF32x4Z256rri (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm)), (iPTR 1)))>; def : Pat<(v8i16 (extract_subvector (v32i16 VR512:$src), (iPTR 8))), - (v8i16 (VEXTRACTI32x4Z256rr + (v8i16 (VEXTRACTI32x4Z256rri (v16i16 (EXTRACT_SUBREG (v32i16 VR512:$src), sub_ymm)), (iPTR 1)))>; def : Pat<(v8f16 (extract_subvector (v32f16 VR512:$src), (iPTR 8))), - (v8f16 (VEXTRACTF32x4Z256rr + (v8f16 (VEXTRACTF32x4Z256rri (v16f16 (EXTRACT_SUBREG (v32f16 VR512:$src), sub_ymm)), (iPTR 1)))>; def : Pat<(v16i8 (extract_subvector (v64i8 VR512:$src), (iPTR 16))), - (v16i8 (VEXTRACTI32x4Z256rr + (v16i8 (VEXTRACTI32x4Z256rri (v32i8 (EXTRACT_SUBREG (v64i8 VR512:$src), sub_ymm)), (iPTR 1)))>; } @@ -904,7 +904,7 @@ let Predicates = p in { (To.VT (vextract_extract:$ext (From.VT From.RC:$src), (iPTR imm)))), To.RC:$src0)), - (Cast.VT (!cast<Instruction>(InstrStr#"rrk") + (Cast.VT (!cast<Instruction>(InstrStr#"rrik") Cast.RC:$src0, Cast.KRCWM:$mask, From.RC:$src, (EXTRACT_get_vextract_imm To.RC:$ext)))>; @@ -913,7 +913,7 @@ let Predicates = p in { (To.VT (vextract_extract:$ext (From.VT From.RC:$src), (iPTR imm)))), Cast.ImmAllZerosV)), - (Cast.VT (!cast<Instruction>(InstrStr#"rrkz") + (Cast.VT (!cast<Instruction>(InstrStr#"rrikz") Cast.KRCWM:$mask, From.RC:$src, (EXTRACT_get_vextract_imm To.RC:$ext)))>; } @@ -998,13 +998,13 @@ defm : vextract_for_mask_cast<"VEXTRACTI64x4Z", v64i8_info, v32i8x_info, EXTRACT_get_vextract256_imm, [HasAVX512]>; // vextractps - extract 32 bits from XMM -def VEXTRACTPSZrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32orGR64:$dst), +def VEXTRACTPSZrri : AVX512AIi8<0x17, MRMDestReg, (outs GR32orGR64:$dst), (ins VR128X:$src1, u8imm:$src2), "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set GR32orGR64:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>, EVEX, WIG, Sched<[WriteVecExtract]>; -def VEXTRACTPSZmr : AVX512AIi8<0x17, MRMDestMem, (outs), +def VEXTRACTPSZmri : AVX512AIi8<0x17, MRMDestMem, (outs), (ins f32mem:$dst, VR128X:$src1, u8imm:$src2), "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2), @@ -11432,7 +11432,7 @@ defm VPUNPCKHQDQ : avx512_binop_rm_vl_q<0x6D, "vpunpckhqdq", X86Unpckh, multiclass avx512_extract_elt_bw_m<bits<8> opc, string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> { - def mr : AVX512Ii8<opc, MRMDestMem, (outs), + def mri : AVX512Ii8<opc, MRMDestMem, (outs), (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2), OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(store (_.EltVT (trunc (OpNode (_.VT _.RC:$src1), timm:$src2))), @@ -11442,7 +11442,7 @@ multiclass avx512_extract_elt_bw_m<bits<8> opc, string OpcodeStr, SDNode OpNode, multiclass avx512_extract_elt_b<string OpcodeStr, X86VectorVTInfo _> { let Predicates = [HasBWI] in { - def rr : AVX512Ii8<0x14, MRMDestReg, (outs GR32orGR64:$dst), + def rri : AVX512Ii8<0x14, MRMDestReg, (outs GR32orGR64:$dst), (ins _.RC:$src1, u8imm:$src2), OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set GR32orGR64:$dst, @@ -11455,7 +11455,7 @@ multiclass avx512_extract_elt_b<string OpcodeStr, X86VectorVTInfo _> { multiclass avx512_extract_elt_w<string OpcodeStr, X86VectorVTInfo _> { let Predicates = [HasBWI] in { - def rr : AVX512Ii8<0xC5, MRMSrcReg, (outs GR32orGR64:$dst), + def rri : AVX512Ii8<0xC5, MRMSrcReg, (outs GR32orGR64:$dst), (ins _.RC:$src1, u8imm:$src2), OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set GR32orGR64:$dst, @@ -11463,7 +11463,7 @@ multiclass avx512_extract_elt_w<string OpcodeStr, X86VectorVTInfo _> { EVEX, TB, PD, Sched<[WriteVecExtract]>; let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in - def rr_REV : AVX512Ii8<0x15, MRMDestReg, (outs GR32orGR64:$dst), + def rri_REV : AVX512Ii8<0x15, MRMDestReg, (outs GR32orGR64:$dst), (ins _.RC:$src1, u8imm:$src2), OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, EVEX, TA, PD, Sched<[WriteVecExtract]>; @@ -11475,14 +11475,14 @@ multiclass avx512_extract_elt_w<string OpcodeStr, X86VectorVTInfo _> { multiclass avx512_extract_elt_dq<string OpcodeStr, X86VectorVTInfo _, RegisterClass GRC> { let Predicates = [HasDQI] in { - def rr : AVX512Ii8<0x16, MRMDestReg, (outs GRC:$dst), + def rri : AVX512Ii8<0x16, MRMDestReg, (outs GRC:$dst), (ins _.RC:$src1, u8imm:$src2), OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set GRC:$dst, (extractelt (_.VT _.RC:$src1), imm:$src2))]>, EVEX, TA, PD, Sched<[WriteVecExtract]>; - def mr : AVX512Ii8<0x16, MRMDestMem, (outs), + def mri : AVX512Ii8<0x16, MRMDestMem, (outs), (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2), OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(store (extractelt (_.VT _.RC:$src1), @@ -11500,7 +11500,7 @@ defm VPEXTRQZ : avx512_extract_elt_dq<"vpextrq", v2i64x_info, GR64>, REX_W; multiclass avx512_insert_elt_m<bits<8> opc, string OpcodeStr, SDNode OpNode, X86VectorVTInfo _, PatFrag LdFrag, SDPatternOperator immoperator> { - def rm : AVX512Ii8<opc, MRMSrcMem, (outs _.RC:$dst), + def rmi : AVX512Ii8<opc, MRMSrcMem, (outs _.RC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3), OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", [(set _.RC:$dst, @@ -11511,7 +11511,7 @@ multiclass avx512_insert_elt_m<bits<8> opc, string OpcodeStr, SDNode OpNode, multiclass avx512_insert_elt_bw<bits<8> opc, string OpcodeStr, SDNode OpNode, X86VectorVTInfo _, PatFrag LdFrag> { let Predicates = [HasBWI] in { - def rr : AVX512Ii8<opc, MRMSrcReg, (outs _.RC:$dst), + def rri : AVX512Ii8<opc, MRMSrcReg, (outs _.RC:$dst), (ins _.RC:$src1, GR32orGR64:$src2, u8imm:$src3), OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", [(set _.RC:$dst, @@ -11525,7 +11525,7 @@ multiclass avx512_insert_elt_bw<bits<8> opc, string OpcodeStr, SDNode OpNode, multiclass avx512_insert_elt_dq<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, RegisterClass GRC> { let Predicates = [HasDQI] in { - def rr : AVX512Ii8<opc, MRMSrcReg, (outs _.RC:$dst), + def rri : AVX512Ii8<opc, MRMSrcReg, (outs _.RC:$dst), (ins _.RC:$src1, GRC:$src2, u8imm:$src3), OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", [(set _.RC:$dst, @@ -11548,27 +11548,27 @@ let Predicates = [HasAVX512, NoBWI] in { def : Pat<(X86pinsrb VR128:$src1, (i32 (anyext (i8 (bitconvert v8i1:$src2)))), timm:$src3), - (VPINSRBrr VR128:$src1, (i32 (COPY_TO_REGCLASS VK8:$src2, GR32)), - timm:$src3)>; + (VPINSRBrri VR128:$src1, (i32 (COPY_TO_REGCLASS VK8:$src2, GR32)), + timm:$src3)>; } let Predicates = [HasBWI] in { def : Pat<(X86pinsrb VR128:$src1, (i32 (anyext (i8 GR8:$src2))), timm:$src3), - (VPINSRBZrr VR128:$src1, (INSERT_SUBREG (i32 (IMPLICIT_DEF)), - GR8:$src2, sub_8bit), timm:$src3)>; + (VPINSRBZrri VR128:$src1, (INSERT_SUBREG (i32 (IMPLICIT_DEF)), + GR8:$src2, sub_8bit), timm:$src3)>; def : Pat<(X86pinsrb VR128:$src1, (i32 (anyext (i8 (bitconvert v8i1:$src2)))), timm:$src3), - (VPINSRBZrr VR128:$src1, (i32 (COPY_TO_REGCLASS VK8:$src2, GR32)), - timm:$src3)>; + (VPINSRBZrri VR128:$src1, (i32 (COPY_TO_REGCLASS VK8:$src2, GR32)), + timm:$src3)>; } // Always select FP16 instructions if available. let Predicates = [HasBWI], AddedComplexity = -10 in { - def : Pat<(f16 (load addr:$src)), (COPY_TO_REGCLASS (VPINSRWZrm (v8i16 (IMPLICIT_DEF)), addr:$src, 0), FR16X)>; - def : Pat<(store f16:$src, addr:$dst), (VPEXTRWZmr addr:$dst, (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0)>; - def : Pat<(i16 (bitconvert f16:$src)), (EXTRACT_SUBREG (VPEXTRWZrr (v8i16 (COPY_TO_REGCLASS FR16X:$src, VR128X)), 0), sub_16bit)>; - def : Pat<(f16 (bitconvert i16:$src)), (COPY_TO_REGCLASS (VPINSRWZrr (v8i16 (IMPLICIT_DEF)), (INSERT_SUBREG (IMPLICIT_DEF), GR16:$src, sub_16bit), 0), FR16X)>; + def : Pat<(f16 (load addr:$src)), (COPY_TO_REGCLASS (VPINSRWZrmi (v8i16 (IMPLICIT_DEF)), addr:$src, 0), FR16X)>; + def : Pat<(store f16:$src, addr:$dst), (VPEXTRWZmri addr:$dst, (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0)>; + def : Pat<(i16 (bitconvert f16:$src)), (EXTRACT_SUBREG (VPEXTRWZrri (v8i16 (COPY_TO_REGCLASS FR16X:$src, VR128X)), 0), sub_16bit)>; + def : Pat<(f16 (bitconvert i16:$src)), (COPY_TO_REGCLASS (VPINSRWZrri (v8i16 (IMPLICIT_DEF)), (INSERT_SUBREG (IMPLICIT_DEF), GR16:$src, sub_16bit), 0), FR16X)>; } //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 401b8ce..a74da000 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -2424,9 +2424,9 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, WorkingMI->getOperand(3).setImm(Mask ^ Imm); break; } - case X86::INSERTPSrr: - case X86::VINSERTPSrr: - case X86::VINSERTPSZrr: { + case X86::INSERTPSrri: + case X86::VINSERTPSrri: + case X86::VINSERTPSZrri: { unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm(); unsigned ZMask = Imm & 15; unsigned DstIdx = (Imm >> 4) & 3; @@ -2597,8 +2597,8 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, .setImm(X86::getSwappedVCMPImm( MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 0x1f)); break; - case X86::VPERM2F128rr: - case X86::VPERM2I128rr: + case X86::VPERM2F128rri: + case X86::VPERM2I128rri: // Flip permute source immediate. // Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi. // Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi. @@ -6258,16 +6258,16 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { get(X86::VBROADCASTF64X4rm), X86::sub_ymm); case X86::VMOVAPSZ128mr_NOVLX: return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr), - get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm); + get(X86::VEXTRACTF32x4Zmri), X86::sub_xmm); case X86::VMOVUPSZ128mr_NOVLX: return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr), - get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm); + get(X86::VEXTRACTF32x4Zmri), X86::sub_xmm); case X86::VMOVAPSZ256mr_NOVLX: return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr), - get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm); + get(X86::VEXTRACTF64x4Zmri), X86::sub_ymm); case X86::VMOVUPSZ256mr_NOVLX: return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr), - get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm); + get(X86::VEXTRACTF64x4Zmri), X86::sub_ymm); case X86::MOV32ri64: { Register Reg = MIB.getReg(0); Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit); @@ -6775,8 +6775,8 @@ static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum, case X86::VPACKUSWBZ128rr: case X86::VPACKSSDWZ128rr: case X86::VPACKUSDWZ128rr: - case X86::VPERM2F128rr: - case X86::VPERM2I128rr: + case X86::VPERM2F128rri: + case X86::VPERM2I128rri: case X86::VSHUFF32X4Z256rri: case X86::VSHUFF32X4Zrri: case X86::VSHUFF64X2Z256rri: @@ -7274,9 +7274,9 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom( ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt, unsigned Size, Align Alignment) const { switch (MI.getOpcode()) { - case X86::INSERTPSrr: - case X86::VINSERTPSrr: - case X86::VINSERTPSZrr: + case X86::INSERTPSrri: + case X86::VINSERTPSrri: + case X86::VINSERTPSZrri: // Attempt to convert the load of inserted vector into a fold load // of a single float. if (OpNum == 2) { @@ -7289,13 +7289,13 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom( const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; if ((Size == 0 || Size >= 16) && RCSize >= 16 && - (MI.getOpcode() != X86::INSERTPSrr || Alignment >= Align(4))) { + (MI.getOpcode() != X86::INSERTPSrri || Alignment >= Align(4))) { int PtrOffset = SrcIdx * 4; unsigned NewImm = (DstIdx << 4) | ZMask; unsigned NewOpCode = - (MI.getOpcode() == X86::VINSERTPSZrr) ? X86::VINSERTPSZrm - : (MI.getOpcode() == X86::VINSERTPSrr) ? X86::VINSERTPSrm - : X86::INSERTPSrm; + (MI.getOpcode() == X86::VINSERTPSZrri) ? X86::VINSERTPSZrmi + : (MI.getOpcode() == X86::VINSERTPSrri) ? X86::VINSERTPSrmi + : X86::INSERTPSrmi; MachineInstr *NewMI = fuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset); NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm); diff --git a/llvm/lib/Target/X86/X86InstrMMX.td b/llvm/lib/Target/X86/X86InstrMMX.td index 60dfe66..644d6d0 100644 --- a/llvm/lib/Target/X86/X86InstrMMX.td +++ b/llvm/lib/Target/X86/X86InstrMMX.td @@ -509,7 +509,7 @@ let Constraints = "$src1 = $dst" in { // Extract / Insert let Predicates = [HasMMX, HasSSE1] in -def MMX_PEXTRWrr: MMXIi8<0xC5, MRMSrcReg, +def MMX_PEXTRWrri : MMXIi8<0xC5, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR64:$src1, i32u8imm:$src2), "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set GR32orGR64:$dst, (int_x86_mmx_pextr_w VR64:$src1, @@ -517,7 +517,7 @@ def MMX_PEXTRWrr: MMXIi8<0xC5, MRMSrcReg, Sched<[WriteVecExtract]>; let Constraints = "$src1 = $dst" in { let Predicates = [HasMMX, HasSSE1] in { - def MMX_PINSRWrr : MMXIi8<0xC4, MRMSrcReg, + def MMX_PINSRWrri : MMXIi8<0xC4, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src1, GR32orGR64:$src2, i32u8imm:$src3), "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}", @@ -525,7 +525,7 @@ let Predicates = [HasMMX, HasSSE1] in { GR32orGR64:$src2, timm:$src3))]>, Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>; - def MMX_PINSRWrm : MMXIi8<0xC4, MRMSrcMem, + def MMX_PINSRWrmi : MMXIi8<0xC4, MRMSrcMem, (outs VR64:$dst), (ins VR64:$src1, i16mem:$src2, i32u8imm:$src3), "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}", diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index 4e5f2e3..d51125a 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -3989,7 +3989,7 @@ let Constraints = "$src1 = $dst" in { let ExeDomain = SSEPackedInt in { multiclass sse2_pinsrw<bit Is2Addr = 1> { - def rr : Ii8<0xC4, MRMSrcReg, + def rri : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, GR32orGR64:$src2, u8imm:$src3), !if(Is2Addr, @@ -3998,7 +3998,7 @@ multiclass sse2_pinsrw<bit Is2Addr = 1> { [(set VR128:$dst, (X86pinsrw VR128:$src1, GR32orGR64:$src2, timm:$src3))]>, Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>; - def rm : Ii8<0xC4, MRMSrcMem, + def rmi : Ii8<0xC4, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i16mem:$src2, u8imm:$src3), !if(Is2Addr, @@ -4012,13 +4012,13 @@ multiclass sse2_pinsrw<bit Is2Addr = 1> { // Extract let Predicates = [HasAVX, NoBWI] in -def VPEXTRWrr : Ii8<0xC5, MRMSrcReg, +def VPEXTRWrri : Ii8<0xC5, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2), "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1), timm:$src2))]>, TB, PD, VEX, WIG, Sched<[WriteVecExtract]>; -def PEXTRWrr : PDIi8<0xC5, MRMSrcReg, +def PEXTRWrri : PDIi8<0xC5, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2), "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1), @@ -4036,16 +4036,16 @@ defm PINSRW : sse2_pinsrw, TB, PD; // Always select FP16 instructions if available. let Predicates = [UseSSE2], AddedComplexity = -10 in { - def : Pat<(f16 (load addr:$src)), (COPY_TO_REGCLASS (PINSRWrm (v8i16 (IMPLICIT_DEF)), addr:$src, 0), FR16)>; - def : Pat<(store f16:$src, addr:$dst), (MOV16mr addr:$dst, (EXTRACT_SUBREG (PEXTRWrr (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0), sub_16bit))>; - def : Pat<(i16 (bitconvert f16:$src)), (EXTRACT_SUBREG (PEXTRWrr (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0), sub_16bit)>; - def : Pat<(f16 (bitconvert i16:$src)), (COPY_TO_REGCLASS (PINSRWrr (v8i16 (IMPLICIT_DEF)), (INSERT_SUBREG (IMPLICIT_DEF), GR16:$src, sub_16bit), 0), FR16)>; + def : Pat<(f16 (load addr:$src)), (COPY_TO_REGCLASS (PINSRWrmi (v8i16 (IMPLICIT_DEF)), addr:$src, 0), FR16)>; + def : Pat<(store f16:$src, addr:$dst), (MOV16mr addr:$dst, (EXTRACT_SUBREG (PEXTRWrri (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0), sub_16bit))>; + def : Pat<(i16 (bitconvert f16:$src)), (EXTRACT_SUBREG (PEXTRWrri (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0), sub_16bit)>; + def : Pat<(f16 (bitconvert i16:$src)), (COPY_TO_REGCLASS (PINSRWrri (v8i16 (IMPLICIT_DEF)), (INSERT_SUBREG (IMPLICIT_DEF), GR16:$src, sub_16bit), 0), FR16)>; } let Predicates = [HasAVX, NoBWI] in { - def : Pat<(f16 (load addr:$src)), (COPY_TO_REGCLASS (VPINSRWrm (v8i16 (IMPLICIT_DEF)), addr:$src, 0), FR16)>; - def : Pat<(i16 (bitconvert f16:$src)), (EXTRACT_SUBREG (VPEXTRWrr (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0), sub_16bit)>; - def : Pat<(f16 (bitconvert i16:$src)), (COPY_TO_REGCLASS (VPINSRWrr (v8i16 (IMPLICIT_DEF)), (INSERT_SUBREG (IMPLICIT_DEF), GR16:$src, sub_16bit), 0), FR16)>; + def : Pat<(f16 (load addr:$src)), (COPY_TO_REGCLASS (VPINSRWrmi (v8i16 (IMPLICIT_DEF)), addr:$src, 0), FR16)>; + def : Pat<(i16 (bitconvert f16:$src)), (EXTRACT_SUBREG (VPEXTRWrri (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0), sub_16bit)>; + def : Pat<(f16 (bitconvert i16:$src)), (COPY_TO_REGCLASS (VPINSRWrri (v8i16 (IMPLICIT_DEF)), (INSERT_SUBREG (IMPLICIT_DEF), GR16:$src, sub_16bit), 0), FR16)>; } //===---------------------------------------------------------------------===// @@ -5234,7 +5234,7 @@ let Predicates = [UseSSE41] in { /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> { - def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst), + def rri : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), @@ -5242,7 +5242,7 @@ multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> { timm:$src2))]>, Sched<[WriteVecExtract]>; let hasSideEffects = 0, mayStore = 1 in - def mr : SS4AIi8<opc, MRMDestMem, (outs), + def mri : SS4AIi8<opc, MRMDestMem, (outs), (ins i8mem:$dst, VR128:$src1, u8imm:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), @@ -5259,14 +5259,14 @@ defm PEXTRB : SS41I_extract8<0x14, "pextrb">; /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> { let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in - def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst), + def rri_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, Sched<[WriteVecExtract]>; let hasSideEffects = 0, mayStore = 1 in - def mr : SS4AIi8<opc, MRMDestMem, (outs), + def mri : SS4AIi8<opc, MRMDestMem, (outs), (ins i16mem:$dst, VR128:$src1, u8imm:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), @@ -5280,22 +5280,22 @@ let Predicates = [HasAVX, NoBWI] in defm PEXTRW : SS41I_extract16<0x15, "pextrw">; let Predicates = [UseSSE41] in - def : Pat<(store f16:$src, addr:$dst), (PEXTRWmr addr:$dst, (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0)>; + def : Pat<(store f16:$src, addr:$dst), (PEXTRWmri addr:$dst, (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0)>; let Predicates = [HasAVX, NoBWI] in - def : Pat<(store f16:$src, addr:$dst), (VPEXTRWmr addr:$dst, (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0)>; + def : Pat<(store f16:$src, addr:$dst), (VPEXTRWmri addr:$dst, (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0)>; /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> { - def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst), + def rri : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst), (ins VR128:$src1, u8imm:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), [(set GR32:$dst, (extractelt (v4i32 VR128:$src1), imm:$src2))]>, Sched<[WriteVecExtract]>; - def mr : SS4AIi8<opc, MRMDestMem, (outs), + def mri : SS4AIi8<opc, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src1, u8imm:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), @@ -5310,14 +5310,14 @@ defm PEXTRD : SS41I_extract32<0x16, "pextrd">; /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> { - def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst), + def rri : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst), (ins VR128:$src1, u8imm:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), [(set GR64:$dst, (extractelt (v2i64 VR128:$src1), imm:$src2))]>, Sched<[WriteVecExtract]>; - def mr : SS4AIi8<opc, MRMDestMem, (outs), + def mri : SS4AIi8<opc, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src1, u8imm:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), @@ -5333,19 +5333,19 @@ defm PEXTRQ : SS41I_extract64<0x16, "pextrq">, REX_W; /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory /// destination multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> { - def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst), - (ins VR128:$src1, u8imm:$src2), - !strconcat(OpcodeStr, - "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), - [(set GR32orGR64:$dst, - (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>, - Sched<[WriteVecExtract]>; - def mr : SS4AIi8<opc, MRMDestMem, (outs), - (ins f32mem:$dst, VR128:$src1, u8imm:$src2), - !strconcat(OpcodeStr, - "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), - [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2), - addr:$dst)]>, Sched<[WriteVecExtractSt]>; + def rri : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst), + (ins VR128:$src1, u8imm:$src2), + !strconcat(OpcodeStr, + "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + [(set GR32orGR64:$dst, + (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>, + Sched<[WriteVecExtract]>; + def mri : SS4AIi8<opc, MRMDestMem, (outs), + (ins f32mem:$dst, VR128:$src1, u8imm:$src2), + !strconcat(OpcodeStr, + "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2), + addr:$dst)]>, Sched<[WriteVecExtractSt]>; } let ExeDomain = SSEPackedSingle in { @@ -5359,7 +5359,7 @@ let ExeDomain = SSEPackedSingle in { //===----------------------------------------------------------------------===// multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> { - def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst), + def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, GR32orGR64:$src2, u8imm:$src3), !if(Is2Addr, !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), @@ -5368,7 +5368,7 @@ multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> { [(set VR128:$dst, (X86pinsrb VR128:$src1, GR32orGR64:$src2, timm:$src3))]>, Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>; - def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst), + def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i8mem:$src2, u8imm:$src3), !if(Is2Addr, !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), @@ -5382,15 +5382,15 @@ multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> { let Predicates = [HasAVX, NoBWI] in { defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX, VVVV, WIG; def : Pat<(X86pinsrb VR128:$src1, (i32 (anyext (i8 GR8:$src2))), timm:$src3), - (VPINSRBrr VR128:$src1, (INSERT_SUBREG (i32 (IMPLICIT_DEF)), - GR8:$src2, sub_8bit), timm:$src3)>; + (VPINSRBrri VR128:$src1, (INSERT_SUBREG (i32 (IMPLICIT_DEF)), + GR8:$src2, sub_8bit), timm:$src3)>; } let Constraints = "$src1 = $dst" in defm PINSRB : SS41I_insert8<0x20, "pinsrb">; multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> { - def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst), + def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, GR32:$src2, u8imm:$src3), !if(Is2Addr, !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), @@ -5399,7 +5399,7 @@ multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> { [(set VR128:$dst, (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>, Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>; - def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst), + def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2, u8imm:$src3), !if(Is2Addr, !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), @@ -5416,7 +5416,7 @@ let Constraints = "$src1 = $dst" in defm PINSRD : SS41I_insert32<0x22, "pinsrd">; multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> { - def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst), + def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, GR64:$src2, u8imm:$src3), !if(Is2Addr, !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), @@ -5425,7 +5425,7 @@ multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> { [(set VR128:$dst, (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>, Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>; - def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst), + def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2, u8imm:$src3), !if(Is2Addr, !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), @@ -5447,7 +5447,7 @@ let Constraints = "$src1 = $dst" in // in the target vector. multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> { let isCommutable = 1 in - def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst), + def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, u8imm:$src3), !if(Is2Addr, !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), @@ -5456,7 +5456,7 @@ multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> { [(set VR128:$dst, (X86insertps VR128:$src1, VR128:$src2, timm:$src3))]>, Sched<[SchedWriteFShuffle.XMM]>; - def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst), + def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2, u8imm:$src3), !if(Is2Addr, !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), @@ -7164,11 +7164,11 @@ let Predicates = [HasAVXNECONVERT, NoVLX] in let ExeDomain = SSEPackedSingle in { let isCommutable = 1 in -def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst), +def VPERM2F128rri : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src1, VR256:$src2, u8imm:$src3), "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>, VEX, VVVV, VEX_L, Sched<[WriteFShuffle256]>; -def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst), +def VPERM2F128rmi : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst), (ins VR256:$src1, f256mem:$src2, u8imm:$src3), "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>, VEX, VVVV, VEX_L, Sched<[WriteFShuffle256.Folded, WriteFShuffle256.ReadAfterFold]>; @@ -7181,12 +7181,12 @@ def Perm2XCommuteImm : SDNodeXForm<timm, [{ multiclass vperm2x128_lowering<string InstrStr, ValueType VT, PatFrag memop_frag> { def : Pat<(VT (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 timm:$imm))), - (!cast<Instruction>(InstrStr#rr) VR256:$src1, VR256:$src2, timm:$imm)>; + (!cast<Instruction>(InstrStr#rri) VR256:$src1, VR256:$src2, timm:$imm)>; def : Pat<(VT (X86VPerm2x128 VR256:$src1, (memop_frag addr:$src2), (i8 timm:$imm))), - (!cast<Instruction>(InstrStr#rm) VR256:$src1, addr:$src2, timm:$imm)>; + (!cast<Instruction>(InstrStr#rmi) VR256:$src1, addr:$src2, timm:$imm)>; // Pattern with load in other operand. def : Pat<(VT (X86VPerm2x128 (memop_frag addr:$src2), VR256:$src1, (i8 timm:$imm))), - (!cast<Instruction>(InstrStr#rm) VR256:$src1, addr:$src2, + (!cast<Instruction>(InstrStr#rmi) VR256:$src1, addr:$src2, (Perm2XCommuteImm timm:$imm))>; } @@ -7207,12 +7207,12 @@ let Predicates = [HasAVX1Only] in { // VINSERTF128 - Insert packed floating-point values // let hasSideEffects = 0, ExeDomain = SSEPackedSingle in { -def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst), +def VINSERTF128rri : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src1, VR128:$src2, u8imm:$src3), "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>, Sched<[WriteFShuffle256]>, VEX, VVVV, VEX_L; let mayLoad = 1 in -def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst), +def VINSERTF128rmi : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst), (ins VR256:$src1, f128mem:$src2, u8imm:$src3), "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>, Sched<[WriteFShuffle256.Folded, WriteFShuffle256.ReadAfterFold]>, VEX, VVVV, VEX_L; @@ -7230,18 +7230,18 @@ multiclass vinsert_lowering<string InstrStr, string PermStr, PatFrag frommemop_frag, PatFrag tomemop_frag> { def : Pat<(vinsert128_insert:$ins (To VR256:$src1), (From VR128:$src2), (iPTR imm)), - (!cast<Instruction>(InstrStr#rr) VR256:$src1, VR128:$src2, + (!cast<Instruction>(InstrStr#rri) VR256:$src1, VR128:$src2, (INSERT_get_vinsert128_imm VR256:$ins))>; def : Pat<(vinsert128_insert:$ins (To VR256:$src1), (From (frommemop_frag addr:$src2)), (iPTR imm)), - (!cast<Instruction>(InstrStr#rm) VR256:$src1, addr:$src2, + (!cast<Instruction>(InstrStr#rmi) VR256:$src1, addr:$src2, (INSERT_get_vinsert128_imm VR256:$ins))>; // Folding "To" vector - convert to perm2x128 and commute inputs. def : Pat<(vinsert128_insert:$ins (To (tomemop_frag addr:$src1)), (From VR128:$src2), (iPTR imm)), - (!cast<Instruction>(PermStr#rm) + (!cast<Instruction>(PermStr#rmi) (INSERT_SUBREG (To (IMPLICIT_DEF)), VR128:$src2, sub_xmm), addr:$src1, (INSERT_get_vperm2x128_commutedimm VR256:$ins))>; } @@ -7264,12 +7264,12 @@ let Predicates = [HasAVX1Only] in { // VEXTRACTF128 - Extract packed floating-point values // let hasSideEffects = 0, ExeDomain = SSEPackedSingle in { -def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst), +def VEXTRACTF128rri : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst), (ins VR256:$src1, u8imm:$src2), "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, Sched<[WriteFShuffle256]>, VEX, VEX_L; let mayStore = 1 in -def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs), +def VEXTRACTF128mri : AVXAIi8<0x19, MRMDestMem, (outs), (ins f128mem:$dst, VR256:$src1, u8imm:$src2), "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, Sched<[WriteFStoreX]>, VEX, VEX_L; @@ -7277,12 +7277,12 @@ def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs), multiclass vextract_lowering<string InstrStr, ValueType From, ValueType To> { def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)), - (To (!cast<Instruction>(InstrStr#rr) + (To (!cast<Instruction>(InstrStr#rri) (From VR256:$src1), (EXTRACT_get_vextract128_imm VR128:$ext)))>; def : Pat<(store (To (vextract128_extract:$ext (From VR256:$src1), (iPTR imm))), addr:$dst), - (!cast<Instruction>(InstrStr#mr) addr:$dst, VR256:$src1, + (!cast<Instruction>(InstrStr#mri) addr:$dst, VR256:$src1, (EXTRACT_get_vextract128_imm VR128:$ext))>; } @@ -7766,30 +7766,30 @@ let Predicates = [HasAVX1Only] in { def : Pat<(v4f32 (X86VBroadcast FR32:$src)), (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)>; def : Pat<(v8f32 (X86VBroadcast FR32:$src)), - (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), + (VINSERTF128rri (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), (v4f32 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)), sub_xmm), (v4f32 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)), 1)>; def : Pat<(v8f32 (X86VBroadcast v4f32:$src)), - (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), + (VINSERTF128rri (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), (v4f32 (VPERMILPSri VR128:$src, 0)), sub_xmm), (v4f32 (VPERMILPSri VR128:$src, 0)), 1)>; def : Pat<(v4f64 (X86VBroadcast FR64:$src)), - (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), + (VINSERTF128rri (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), (v2f64 (VMOVDDUPrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))), sub_xmm), (v2f64 (VMOVDDUPrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))), 1)>; def : Pat<(v4f64 (X86VBroadcast v2f64:$src)), - (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), + (VINSERTF128rri (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), (v2f64 (VMOVDDUPrr VR128:$src)), sub_xmm), (v2f64 (VMOVDDUPrr VR128:$src)), 1)>; def : Pat<(v4i32 (X86VBroadcast GR32:$src)), (VPSHUFDri (VMOVDI2PDIrr GR32:$src), 0)>; def : Pat<(v8i32 (X86VBroadcast GR32:$src)), - (VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), + (VINSERTF128rri (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), (v4i32 (VPSHUFDri (VMOVDI2PDIrr GR32:$src), 0)), sub_xmm), (v4i32 (VPSHUFDri (VMOVDI2PDIrr GR32:$src), 0)), 1)>; def : Pat<(v4i64 (X86VBroadcast GR64:$src)), - (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), + (VINSERTF128rri (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), (v4i32 (VPSHUFDri (VMOV64toPQIrr GR64:$src), 0x44)), sub_xmm), (v4i32 (VPSHUFDri (VMOV64toPQIrr GR64:$src), 0x44)), 1)>; @@ -7799,7 +7799,7 @@ let Predicates = [HasAVX1Only] in { (VMOVDDUPrm addr:$src)>; def : Pat<(v4i64 (X86VBroadcast v2i64:$src)), - (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), + (VINSERTF128rri (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), (v2i64 (VPSHUFDri VR128:$src, 0x44)), sub_xmm), (v2i64 (VPSHUFDri VR128:$src, 0x44)), 1)>; } @@ -7866,11 +7866,11 @@ defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", loadv4f64, v4f64, // VPERM2I128 - Permute Integer vector Values in 128-bit chunks // let isCommutable = 1 in -def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst), +def VPERM2I128rri : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src1, VR256:$src2, u8imm:$src3), "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>, Sched<[WriteShuffle256]>, VEX, VVVV, VEX_L; -def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst), +def VPERM2I128rmi : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst), (ins VR256:$src1, f256mem:$src2, u8imm:$src3), "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>, Sched<[WriteShuffle256.Folded, WriteShuffle256.ReadAfterFold]>, VEX, VVVV, VEX_L; @@ -7888,12 +7888,12 @@ let Predicates = [HasAVX2] in { // VINSERTI128 - Insert packed integer values // let hasSideEffects = 0 in { -def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst), +def VINSERTI128rri : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src1, VR128:$src2, u8imm:$src3), "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>, Sched<[WriteShuffle256]>, VEX, VVVV, VEX_L; let mayLoad = 1 in -def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst), +def VINSERTI128rmi : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst), (ins VR256:$src1, i128mem:$src2, u8imm:$src3), "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>, Sched<[WriteShuffle256.Folded, WriteShuffle256.ReadAfterFold]>, VEX, VVVV, VEX_L; @@ -7914,12 +7914,12 @@ let Predicates = [HasAVXNECONVERT, NoVLX] in //===----------------------------------------------------------------------===// // VEXTRACTI128 - Extract packed integer values // -def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst), +def VEXTRACTI128rri : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst), (ins VR256:$src1, u8imm:$src2), "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, Sched<[WriteShuffle256]>, VEX, VEX_L; let hasSideEffects = 0, mayStore = 1 in -def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs), +def VEXTRACTI128mri : AVX2AIi8<0x39, MRMDestMem, (outs), (ins i128mem:$dst, VR256:$src1, u8imm:$src2), "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, Sched<[SchedWriteVecMoveLS.XMM.MR]>, VEX, VEX_L; diff --git a/llvm/lib/Target/X86/X86ReplaceableInstrs.def b/llvm/lib/Target/X86/X86ReplaceableInstrs.def index e138319..9deb7a8 100644 --- a/llvm/lib/Target/X86/X86ReplaceableInstrs.def +++ b/llvm/lib/Target/X86/X86ReplaceableInstrs.def @@ -42,8 +42,8 @@ ENTRY(UNPCKLPSrm, UNPCKLPSrm, PUNPCKLDQrm) ENTRY(UNPCKLPSrr, UNPCKLPSrr, PUNPCKLDQrr) ENTRY(UNPCKHPSrm, UNPCKHPSrm, PUNPCKHDQrm) ENTRY(UNPCKHPSrr, UNPCKHPSrr, PUNPCKHDQrr) -ENTRY(EXTRACTPSmr, EXTRACTPSmr, PEXTRDmr) -ENTRY(EXTRACTPSrr, EXTRACTPSrr, PEXTRDrr) +ENTRY(EXTRACTPSmri, EXTRACTPSmri, PEXTRDmri) +ENTRY(EXTRACTPSrri, EXTRACTPSrri, PEXTRDrri) // AVX 128-bit support ENTRY(VMOVAPSmr, VMOVAPDmr, VMOVDQAmr) ENTRY(VMOVAPSrm, VMOVAPDrm, VMOVDQArm) @@ -74,8 +74,8 @@ ENTRY(VUNPCKLPSrm, VUNPCKLPSrm, VPUNPCKLDQrm) ENTRY(VUNPCKLPSrr, VUNPCKLPSrr, VPUNPCKLDQrr) ENTRY(VUNPCKHPSrm, VUNPCKHPSrm, VPUNPCKHDQrm) ENTRY(VUNPCKHPSrr, VUNPCKHPSrr, VPUNPCKHDQrr) -ENTRY(VEXTRACTPSmr, VEXTRACTPSmr, VPEXTRDmr) -ENTRY(VEXTRACTPSrr, VEXTRACTPSrr, VPEXTRDrr) +ENTRY(VEXTRACTPSmri, VEXTRACTPSmri, VPEXTRDmri) +ENTRY(VEXTRACTPSrri, VEXTRACTPSrri, VPEXTRDrri) // AVX 256-bit support ENTRY(VMOVAPSYmr, VMOVAPDYmr, VMOVDQAYmr) ENTRY(VMOVAPSYrm, VMOVAPDYrm, VMOVDQAYrm) @@ -110,30 +110,30 @@ ENTRY(VBROADCASTSDZ256rr, VBROADCASTSDZ256rr, VPBROADCASTQZ256rr) ENTRY(VBROADCASTSDZ256rm, VBROADCASTSDZ256rm, VPBROADCASTQZ256rm) ENTRY(VBROADCASTSDZrr, VBROADCASTSDZrr, VPBROADCASTQZrr) ENTRY(VBROADCASTSDZrm, VBROADCASTSDZrm, VPBROADCASTQZrm) -ENTRY(VINSERTF32x4Zrr, VINSERTF32x4Zrr, VINSERTI32x4Zrr) -ENTRY(VINSERTF32x4Zrm, VINSERTF32x4Zrm, VINSERTI32x4Zrm) -ENTRY(VINSERTF32x8Zrr, VINSERTF32x8Zrr, VINSERTI32x8Zrr) -ENTRY(VINSERTF32x8Zrm, VINSERTF32x8Zrm, VINSERTI32x8Zrm) -ENTRY(VINSERTF64x2Zrr, VINSERTF64x2Zrr, VINSERTI64x2Zrr) -ENTRY(VINSERTF64x2Zrm, VINSERTF64x2Zrm, VINSERTI64x2Zrm) -ENTRY(VINSERTF64x4Zrr, VINSERTF64x4Zrr, VINSERTI64x4Zrr) -ENTRY(VINSERTF64x4Zrm, VINSERTF64x4Zrm, VINSERTI64x4Zrm) -ENTRY(VINSERTF32x4Z256rr, VINSERTF32x4Z256rr, VINSERTI32x4Z256rr) -ENTRY(VINSERTF32x4Z256rm, VINSERTF32x4Z256rm, VINSERTI32x4Z256rm) -ENTRY(VINSERTF64x2Z256rr, VINSERTF64x2Z256rr, VINSERTI64x2Z256rr) -ENTRY(VINSERTF64x2Z256rm, VINSERTF64x2Z256rm, VINSERTI64x2Z256rm) -ENTRY(VEXTRACTF32x4Zrr, VEXTRACTF32x4Zrr, VEXTRACTI32x4Zrr) -ENTRY(VEXTRACTF32x4Zmr, VEXTRACTF32x4Zmr, VEXTRACTI32x4Zmr) -ENTRY(VEXTRACTF32x8Zrr, VEXTRACTF32x8Zrr, VEXTRACTI32x8Zrr) -ENTRY(VEXTRACTF32x8Zmr, VEXTRACTF32x8Zmr, VEXTRACTI32x8Zmr) -ENTRY(VEXTRACTF64x2Zrr, VEXTRACTF64x2Zrr, VEXTRACTI64x2Zrr) -ENTRY(VEXTRACTF64x2Zmr, VEXTRACTF64x2Zmr, VEXTRACTI64x2Zmr) -ENTRY(VEXTRACTF64x4Zrr, VEXTRACTF64x4Zrr, VEXTRACTI64x4Zrr) -ENTRY(VEXTRACTF64x4Zmr, VEXTRACTF64x4Zmr, VEXTRACTI64x4Zmr) -ENTRY(VEXTRACTF32x4Z256rr, VEXTRACTF32x4Z256rr, VEXTRACTI32x4Z256rr) -ENTRY(VEXTRACTF32x4Z256mr, VEXTRACTF32x4Z256mr, VEXTRACTI32x4Z256mr) -ENTRY(VEXTRACTF64x2Z256rr, VEXTRACTF64x2Z256rr, VEXTRACTI64x2Z256rr) -ENTRY(VEXTRACTF64x2Z256mr, VEXTRACTF64x2Z256mr, VEXTRACTI64x2Z256mr) +ENTRY(VINSERTF32x4Zrri, VINSERTF32x4Zrri, VINSERTI32x4Zrri) +ENTRY(VINSERTF32x4Zrmi, VINSERTF32x4Zrmi, VINSERTI32x4Zrmi) +ENTRY(VINSERTF32x8Zrri, VINSERTF32x8Zrri, VINSERTI32x8Zrri) +ENTRY(VINSERTF32x8Zrmi, VINSERTF32x8Zrmi, VINSERTI32x8Zrmi) +ENTRY(VINSERTF64x2Zrri, VINSERTF64x2Zrri, VINSERTI64x2Zrri) +ENTRY(VINSERTF64x2Zrmi, VINSERTF64x2Zrmi, VINSERTI64x2Zrmi) +ENTRY(VINSERTF64x4Zrri, VINSERTF64x4Zrri, VINSERTI64x4Zrri) +ENTRY(VINSERTF64x4Zrmi, VINSERTF64x4Zrmi, VINSERTI64x4Zrmi) +ENTRY(VINSERTF32x4Z256rri, VINSERTF32x4Z256rri, VINSERTI32x4Z256rri) +ENTRY(VINSERTF32x4Z256rmi, VINSERTF32x4Z256rmi, VINSERTI32x4Z256rmi) +ENTRY(VINSERTF64x2Z256rri, VINSERTF64x2Z256rri, VINSERTI64x2Z256rri) +ENTRY(VINSERTF64x2Z256rmi, VINSERTF64x2Z256rmi, VINSERTI64x2Z256rmi) +ENTRY(VEXTRACTF32x4Zrri, VEXTRACTF32x4Zrri, VEXTRACTI32x4Zrri) +ENTRY(VEXTRACTF32x4Zmri, VEXTRACTF32x4Zmri, VEXTRACTI32x4Zmri) +ENTRY(VEXTRACTF32x8Zrri, VEXTRACTF32x8Zrri, VEXTRACTI32x8Zrri) +ENTRY(VEXTRACTF32x8Zmri, VEXTRACTF32x8Zmri, VEXTRACTI32x8Zmri) +ENTRY(VEXTRACTF64x2Zrri, VEXTRACTF64x2Zrri, VEXTRACTI64x2Zrri) +ENTRY(VEXTRACTF64x2Zmri, VEXTRACTF64x2Zmri, VEXTRACTI64x2Zmri) +ENTRY(VEXTRACTF64x4Zrri, VEXTRACTF64x4Zrri, VEXTRACTI64x4Zrri) +ENTRY(VEXTRACTF64x4Zmri, VEXTRACTF64x4Zmri, VEXTRACTI64x4Zmri) +ENTRY(VEXTRACTF32x4Z256rri, VEXTRACTF32x4Z256rri, VEXTRACTI32x4Z256rri) +ENTRY(VEXTRACTF32x4Z256mri, VEXTRACTF32x4Z256mri, VEXTRACTI32x4Z256mri) +ENTRY(VEXTRACTF64x2Z256rri, VEXTRACTF64x2Z256rri, VEXTRACTI64x2Z256rri) +ENTRY(VEXTRACTF64x2Z256mri, VEXTRACTF64x2Z256mri, VEXTRACTI64x2Z256mri) ENTRY(VPERMILPSmi, VPERMILPSmi, VPSHUFDmi) ENTRY(VPERMILPSri, VPERMILPSri, VPSHUFDri) ENTRY(VPERMILPSZ128mi, VPERMILPSZ128mi, VPSHUFDZ128mi) @@ -178,8 +178,8 @@ ENTRY(VUNPCKLPSZrm, VUNPCKLPSZrm, VPUNPCKLDQZrm) ENTRY(VUNPCKLPSZrr, VUNPCKLPSZrr, VPUNPCKLDQZrr) ENTRY(VUNPCKHPSZrm, VUNPCKHPSZrm, VPUNPCKHDQZrm) ENTRY(VUNPCKHPSZrr, VUNPCKHPSZrr, VPUNPCKHDQZrr) -ENTRY(VEXTRACTPSZmr, VEXTRACTPSZmr, VPEXTRDZmr) -ENTRY(VEXTRACTPSZrr, VEXTRACTPSZrr, VPEXTRDZrr) +ENTRY(VEXTRACTPSZmri, VEXTRACTPSZmri, VPEXTRDZmri) +ENTRY(VEXTRACTPSZrri, VEXTRACTPSZrri, VPEXTRDZrri) }; static const uint16_t ReplaceableInstrsAVX2[][3] = { @@ -192,8 +192,8 @@ ENTRY(VORPSYrm, VORPDYrm, VPORYrm) ENTRY(VORPSYrr, VORPDYrr, VPORYrr) ENTRY(VXORPSYrm, VXORPDYrm, VPXORYrm) ENTRY(VXORPSYrr, VXORPDYrr, VPXORYrr) -ENTRY(VPERM2F128rm, VPERM2F128rm, VPERM2I128rm) -ENTRY(VPERM2F128rr, VPERM2F128rr, VPERM2I128rr) +ENTRY(VPERM2F128rmi, VPERM2F128rmi, VPERM2I128rmi) +ENTRY(VPERM2F128rri, VPERM2F128rri, VPERM2I128rri) ENTRY(VBROADCASTSSrm, VBROADCASTSSrm, VPBROADCASTDrm) ENTRY(VBROADCASTSSrr, VBROADCASTSSrr, VPBROADCASTDrr) ENTRY(VMOVDDUPrm, VMOVDDUPrm, VPBROADCASTQrm) @@ -232,10 +232,10 @@ ENTRY(VMOVHPSZ128mr, VMOVHPDZ128mr, INSTRUCTION_LIST_END) static const uint16_t ReplaceableInstrsAVX2InsertExtract[][3] = { // PackedSingle, PackedDouble, PackedInt -ENTRY(VEXTRACTF128mr, VEXTRACTF128mr, VEXTRACTI128mr) -ENTRY(VEXTRACTF128rr, VEXTRACTF128rr, VEXTRACTI128rr) -ENTRY(VINSERTF128rm, VINSERTF128rm, VINSERTI128rm) -ENTRY(VINSERTF128rr, VINSERTF128rr, VINSERTI128rr) +ENTRY(VEXTRACTF128mri, VEXTRACTF128mri, VEXTRACTI128mri) +ENTRY(VEXTRACTF128rri, VEXTRACTF128rri, VEXTRACTI128rri) +ENTRY(VINSERTF128rmi, VINSERTF128rmi, VINSERTI128rmi) +ENTRY(VINSERTF128rri, VINSERTF128rri, VINSERTI128rri) }; // NOTE: These should only be used by the custom domain methods. diff --git a/llvm/lib/Target/X86/X86SchedAlderlakeP.td b/llvm/lib/Target/X86/X86SchedAlderlakeP.td index 7756cd5..aec6906 100644 --- a/llvm/lib/Target/X86/X86SchedAlderlakeP.td +++ b/llvm/lib/Target/X86/X86SchedAlderlakeP.td @@ -886,15 +886,15 @@ def ADLPWriteResGroup50 : SchedWriteRes<[ADLPPort04_09, ADLPPort05, ADLPPort07_0 let Latency = 12; let NumMicroOps = 3; } -def : InstRW<[ADLPWriteResGroup50], (instregex "^(V?)EXTRACTPSmr$")>; +def : InstRW<[ADLPWriteResGroup50], (instregex "^(V?)EXTRACTPSmri$")>; def : InstRW<[ADLPWriteResGroup50], (instrs SMSW16m)>; def ADLPWriteResGroup51 : SchedWriteRes<[ADLPPort00, ADLPPort05]> { let Latency = 4; let NumMicroOps = 2; } -def : InstRW<[ADLPWriteResGroup51], (instregex "^(V?)EXTRACTPSrr$")>; -def : InstRW<[ADLPWriteResGroup51], (instrs MMX_PEXTRWrr)>; +def : InstRW<[ADLPWriteResGroup51], (instregex "^(V?)EXTRACTPSrri$")>; +def : InstRW<[ADLPWriteResGroup51], (instrs MMX_PEXTRWrri)>; def ADLPWriteResGroup52 : SchedWriteRes<[ADLPPort00_01_05_06, ADLPPort02_03, ADLPPort02_03_07, ADLPPort04, ADLPPort06]> { let Latency = 7; @@ -1367,7 +1367,7 @@ def ADLPWriteResGroup121 : SchedWriteRes<[ADLPPort05]> { } def : InstRW<[ADLPWriteResGroup121], (instregex "^MMX_PACKSS(DW|WB)rr$")>; def : InstRW<[ADLPWriteResGroup121], (instrs MMX_PACKUSWBrr)>; -def : InstRW<[ADLPWriteResGroup121, ReadDefault, ReadInt2Fpu], (instrs MMX_PINSRWrr)>; +def : InstRW<[ADLPWriteResGroup121, ReadDefault, ReadInt2Fpu], (instrs MMX_PINSRWrri)>; def ADLPWriteResGroup122 : SchedWriteRes<[ADLPPort00_05, ADLPPort02_03_11]> { let Latency = 9; @@ -1394,7 +1394,7 @@ def ADLPWriteResGroup125 : SchedWriteRes<[ADLPPort02_03_11, ADLPPort05]> { let NumMicroOps = 2; } def : InstRW<[ADLPWriteResGroup125], (instregex "^VPBROADCAST(B|W)Yrm$")>; -def : InstRW<[ADLPWriteResGroup125, ReadAfterLd], (instrs MMX_PINSRWrm)>; +def : InstRW<[ADLPWriteResGroup125, ReadAfterLd], (instrs MMX_PINSRWrmi)>; def : InstRW<[ADLPWriteResGroup125, ReadAfterVecYLd], (instrs VPALIGNRYrmi)>; def ADLPWriteResGroup126 : SchedWriteRes<[ADLPPort00_01_05_06_10, ADLPPort02_03_11]> { @@ -1721,7 +1721,7 @@ def ADLPWriteResGroup176 : SchedWriteRes<[ADLPPort01_05, ADLPPort04_09, ADLPPort let Latency = 12; let NumMicroOps = 3; } -def : InstRW<[ADLPWriteResGroup176], (instregex "^(V?)PEXTR(D|Q)mr$")>; +def : InstRW<[ADLPWriteResGroup176], (instregex "^(V?)PEXTR(D|Q)mri$")>; def ADLPWriteResGroup177 : SchedWriteRes<[ADLPPort00_01, ADLPPort01_05, ADLPPort02_03_11]> { let ReleaseAtCycles = [1, 2, 1]; @@ -2250,7 +2250,7 @@ def ADLPWriteResGroup255 : SchedWriteRes<[ADLPPort00_01_05, ADLPPort02_03_11]> { let Latency = 9; let NumMicroOps = 2; } -def : InstRW<[ADLPWriteResGroup255, ReadAfterVecYLd], (instregex "^VINSERT(F|I)128rm$", +def : InstRW<[ADLPWriteResGroup255, ReadAfterVecYLd], (instregex "^VINSERT(F|I)128rmi$", "^VP(ADD|SUB)(B|D|Q|W)Yrm$")>; def ADLPWriteResGroup256 : SchedWriteRes<[ADLPPort00, ADLPPort00_06, ADLPPort02_03_11]> { diff --git a/llvm/lib/Target/X86/X86SchedBroadwell.td b/llvm/lib/Target/X86/X86SchedBroadwell.td index 697d30a..699ca91 100644 --- a/llvm/lib/Target/X86/X86SchedBroadwell.td +++ b/llvm/lib/Target/X86/X86SchedBroadwell.td @@ -986,8 +986,8 @@ def BWWriteResGroup65 : SchedWriteRes<[BWPort23,BWPort015]> { let NumMicroOps = 2; let ReleaseAtCycles = [1,1]; } -def: InstRW<[BWWriteResGroup65], (instrs VINSERTF128rm, - VINSERTI128rm, +def: InstRW<[BWWriteResGroup65], (instrs VINSERTF128rmi, + VINSERTI128rmi, VPBLENDDrmi)>; def BWWriteResGroup66 : SchedWriteRes<[BWPort23,BWPort0156]> { diff --git a/llvm/lib/Target/X86/X86SchedHaswell.td b/llvm/lib/Target/X86/X86SchedHaswell.td index c4d2ad7..b820418 100644 --- a/llvm/lib/Target/X86/X86SchedHaswell.td +++ b/llvm/lib/Target/X86/X86SchedHaswell.td @@ -1028,8 +1028,8 @@ def HWWriteResGroup17 : SchedWriteRes<[HWPort23,HWPort015]> { let NumMicroOps = 2; let ReleaseAtCycles = [1,1]; } -def: InstRW<[HWWriteResGroup17], (instrs VINSERTF128rm, - VINSERTI128rm, +def: InstRW<[HWWriteResGroup17], (instrs VINSERTF128rmi, + VINSERTI128rmi, VPBLENDDrmi)>; def HWWriteResGroup17_2 : SchedWriteRes<[HWPort23,HWPort015]> { diff --git a/llvm/lib/Target/X86/X86SchedIceLake.td b/llvm/lib/Target/X86/X86SchedIceLake.td index 8c0fb11..b32db53 100644 --- a/llvm/lib/Target/X86/X86SchedIceLake.td +++ b/llvm/lib/Target/X86/X86SchedIceLake.td @@ -884,7 +884,7 @@ def ICXWriteResGroup36 : SchedWriteRes<[ICXPort0,ICXPort5]> { let NumMicroOps = 2; let ReleaseAtCycles = [1,1]; } -def: InstRW<[ICXWriteResGroup36], (instregex "(V?)EXTRACTPS(Z?)rr")>; +def: InstRW<[ICXWriteResGroup36], (instregex "(V?)EXTRACTPS(Z?)rri")>; def ICXWriteResGroup37 : SchedWriteRes<[ICXPort0,ICXPort5]> { let Latency = 3; @@ -1034,7 +1034,7 @@ def ICXWriteResGroup53 : SchedWriteRes<[ICXPort49,ICXPort5,ICXPort78]> { let NumMicroOps = 3; let ReleaseAtCycles = [1,1,1]; } -def: InstRW<[ICXWriteResGroup53], (instregex "(V?)EXTRACTPS(Z?)mr")>; +def: InstRW<[ICXWriteResGroup53], (instregex "(V?)EXTRACTPS(Z?)mri")>; def ICXWriteResGroup54 : SchedWriteRes<[ICXPort49,ICXPort5,ICXPort78]> { let Latency = 4; diff --git a/llvm/lib/Target/X86/X86SchedSandyBridge.td b/llvm/lib/Target/X86/X86SchedSandyBridge.td index 6966400..7be9f51 100644 --- a/llvm/lib/Target/X86/X86SchedSandyBridge.td +++ b/llvm/lib/Target/X86/X86SchedSandyBridge.td @@ -686,7 +686,7 @@ def SBWriteResGroup22 : SchedWriteRes<[SBPort0,SBPort5]> { let NumMicroOps = 2; let ReleaseAtCycles = [1,1]; } -def: InstRW<[SBWriteResGroup22], (instregex "(V?)EXTRACTPSrr")>; +def: InstRW<[SBWriteResGroup22], (instregex "(V?)EXTRACTPSrri")>; def SBWriteResGroup23 : SchedWriteRes<[SBPort05,SBPort015]> { let Latency = 2; @@ -789,7 +789,7 @@ def SBWriteResGroup36 : SchedWriteRes<[SBPort4,SBPort5,SBPort23]> { } def: InstRW<[SBWriteResGroup36], (instrs CALL64pcrel32)>; def: InstRW<[SBWriteResGroup36], (instregex "CALL(16|32|64)r", - "(V?)EXTRACTPSmr")>; + "(V?)EXTRACTPSmri")>; def SBWriteResGroup40 : SchedWriteRes<[SBPort4,SBPort23,SBPort015]> { let Latency = 5; @@ -892,7 +892,7 @@ def SBWriteResGroup58 : SchedWriteRes<[SBPort23,SBPort05]> { let NumMicroOps = 2; let ReleaseAtCycles = [1,1]; } -def: InstRW<[SBWriteResGroup58], (instrs VINSERTF128rm)>; +def: InstRW<[SBWriteResGroup58], (instrs VINSERTF128rmi)>; def SBWriteResGroup59 : SchedWriteRes<[SBPort23,SBPort15]> { let Latency = 7; diff --git a/llvm/lib/Target/X86/X86SchedSapphireRapids.td b/llvm/lib/Target/X86/X86SchedSapphireRapids.td index ff3fe32..1fb3c75 100644 --- a/llvm/lib/Target/X86/X86SchedSapphireRapids.td +++ b/llvm/lib/Target/X86/X86SchedSapphireRapids.td @@ -1004,18 +1004,18 @@ def SPRWriteResGroup54 : SchedWriteRes<[SPRPort04_09, SPRPort05, SPRPort07_08]> let Latency = 12; let NumMicroOps = 3; } -def : InstRW<[SPRWriteResGroup54], (instregex "^(V?)EXTRACTPSmr$", +def : InstRW<[SPRWriteResGroup54], (instregex "^(V?)EXTRACTPSmri$", "^VPMOVQDZ((256)?)mr$")>; def : InstRW<[SPRWriteResGroup54], (instrs SMSW16m, - VEXTRACTPSZmr)>; + VEXTRACTPSZmri)>; def SPRWriteResGroup55 : SchedWriteRes<[SPRPort00, SPRPort05]> { let Latency = 4; let NumMicroOps = 2; } -def : InstRW<[SPRWriteResGroup55], (instregex "^(V?)EXTRACTPSrr$")>; -def : InstRW<[SPRWriteResGroup55], (instrs MMX_PEXTRWrr, - VEXTRACTPSZrr, +def : InstRW<[SPRWriteResGroup55], (instregex "^(V?)EXTRACTPSrri$")>; +def : InstRW<[SPRWriteResGroup55], (instrs MMX_PEXTRWrri, + VEXTRACTPSZrri, VPERMWZrr)>; def SPRWriteResGroup56 : SchedWriteRes<[SPRPort02_03, SPRPort02_03_11, SPRPort04, SPRPort04_09, SPRPort06]> { @@ -1646,7 +1646,7 @@ def : InstRW<[SPRWriteResGroup130], (instregex "^MMX_PACKSS(DW|WB)rr$", "^VPMOV(U?)SQDZrrk(z?)$", "^VPMOVUS(Q|W)BZrr$")>; def : InstRW<[SPRWriteResGroup130], (instrs MMX_PACKUSWBrr)>; -def : InstRW<[SPRWriteResGroup130, ReadDefault, ReadInt2Fpu], (instrs MMX_PINSRWrr)>; +def : InstRW<[SPRWriteResGroup130, ReadDefault, ReadInt2Fpu], (instrs MMX_PINSRWrri)>; def SPRWriteResGroup131 : SchedWriteRes<[SPRPort00_05, SPRPort02_03_11]> { let Latency = 9; @@ -1661,8 +1661,8 @@ def : InstRW<[SPRWriteResGroup131], (instregex "^VBROADCAST(F|I)32X(8|2Z)rmk(z?) "^VMOVDQ(A|U)(32|64)Zrmk(z?)$", "^VPBROADCAST(D|Q)Zrmk(z?)$")>; def : InstRW<[SPRWriteResGroup131, ReadAfterVecLd], (instregex "^MMX_P(ADD|SUB)(B|D|Q|W)rm$")>; -def : InstRW<[SPRWriteResGroup131, ReadAfterVecYLd], (instregex "^VINSERT(F|I)(32|64)x4Zrm((k|kz)?)$", - "^VINSERT(F|I)(32x8|64x2)Zrm((k|kz)?)$", +def : InstRW<[SPRWriteResGroup131, ReadAfterVecYLd], (instregex "^VINSERT(F|I)(32|64)x4Zrmi((k|kz)?)$", + "^VINSERT(F|I)(32x8|64x2)Zrmi((k|kz)?)$", "^VP(ADD|SUB)(B|D|Q|W)Zrm$", "^VP(ADD|SUB)(D|Q)Zrm(b|k|kz)$", "^VP(ADD|SUB)(D|Q)Zrmbk(z?)$", @@ -1691,7 +1691,7 @@ def SPRWriteResGroup134 : SchedWriteRes<[SPRPort02_03_11, SPRPort05]> { def : InstRW<[SPRWriteResGroup134], (instregex "^VPBROADCAST(BY|WZ)rm$", "^VPBROADCAST(B|W)Z256rm$", "^VPBROADCAST(BZ|WY)rm$")>; -def : InstRW<[SPRWriteResGroup134, ReadAfterLd], (instrs MMX_PINSRWrm)>; +def : InstRW<[SPRWriteResGroup134, ReadAfterLd], (instrs MMX_PINSRWrmi)>; def : InstRW<[SPRWriteResGroup134, ReadAfterVecXLd], (instregex "^VFPCLASSP(D|S)Z128rm$")>; def : InstRW<[SPRWriteResGroup134, ReadAfterVecLd], (instregex "^VFPCLASSS(D|H|S)Zrm$")>; def : InstRW<[SPRWriteResGroup134, ReadAfterVecYLd], (instregex "^VPALIGNR(Y|Z256)rmi$")>; @@ -2050,8 +2050,8 @@ def SPRWriteResGroup182 : SchedWriteRes<[SPRPort01_05, SPRPort04_09, SPRPort07_0 let Latency = 12; let NumMicroOps = 3; } -def : InstRW<[SPRWriteResGroup182], (instregex "^(V?)PEXTR(D|Q)mr$", - "^VPEXTR(D|Q)Zmr$", +def : InstRW<[SPRWriteResGroup182], (instregex "^(V?)PEXTR(D|Q)mri$", + "^VPEXTR(D|Q)Zmri$", "^VPMOVQDZ128mr(k?)$")>; def SPRWriteResGroup183 : SchedWriteRes<[SPRPort00_01, SPRPort01_05, SPRPort02_03_11]> { @@ -2704,8 +2704,8 @@ def : InstRW<[SPRWriteResGroup262], (instregex "^VBROADCAST(F|I)32X(2|4)Z256rmk( "^VMOV(D|SH|SL)DUPZ256rmk(z?)$", "^VMOVDQ(A|U)(32|64)Z256rmk(z?)$", "^VPBROADCAST(D|Q)Z256rmk(z?)$")>; -def : InstRW<[SPRWriteResGroup262, ReadAfterVecYLd], (instregex "^VINSERT(F|I)128rm$", - "^VINSERT(F|I)(32x4|64x2)Z256rm((k|kz)?)$", +def : InstRW<[SPRWriteResGroup262, ReadAfterVecYLd], (instregex "^VINSERT(F|I)128rmi$", + "^VINSERT(F|I)(32x4|64x2)Z256rmi((k|kz)?)$", "^VP(ADD|SUB)(B|D|Q|W)(Y|Z256)rm$", "^VP(ADD|SUB)(D|Q)Z256rm(b|k|kz)$", "^VP(ADD|SUB)(D|Q)Z256rmbk(z?)$", diff --git a/llvm/lib/Target/X86/X86SchedSkylakeClient.td b/llvm/lib/Target/X86/X86SchedSkylakeClient.td index 14da1ac..116aa35 100644 --- a/llvm/lib/Target/X86/X86SchedSkylakeClient.td +++ b/llvm/lib/Target/X86/X86SchedSkylakeClient.td @@ -1095,8 +1095,8 @@ def SKLWriteResGroup91 : SchedWriteRes<[SKLPort23,SKLPort015]> { let NumMicroOps = 2; let ReleaseAtCycles = [1,1]; } -def: InstRW<[SKLWriteResGroup91], (instrs VINSERTF128rm, - VINSERTI128rm, +def: InstRW<[SKLWriteResGroup91], (instrs VINSERTF128rmi, + VINSERTI128rmi, VPBLENDDrmi)>; def: InstRW<[SKLWriteResGroup91, ReadAfterVecXLd], (instregex "(V?)PADD(B|D|Q|W)rm", diff --git a/llvm/lib/Target/X86/X86SchedSkylakeServer.td b/llvm/lib/Target/X86/X86SchedSkylakeServer.td index 0ecfc30..649d38d 100644 --- a/llvm/lib/Target/X86/X86SchedSkylakeServer.td +++ b/llvm/lib/Target/X86/X86SchedSkylakeServer.td @@ -1343,7 +1343,7 @@ def: InstRW<[SKXWriteResGroup95, ReadAfterVecXLd], "VBLENDMPSZ128rm(b?)", "VBROADCASTI32X2Z128rm(b?)", "VBROADCASTSSZ128rm(b?)", - "VINSERT(F|I)128rm", + "VINSERT(F|I)128rmi", "VMOVAPDZ128rm(b?)", "VMOVAPSZ128rm(b?)", "VMOVDDUPZ128rm(b?)", diff --git a/llvm/lib/Target/X86/X86ScheduleBdVer2.td b/llvm/lib/Target/X86/X86ScheduleBdVer2.td index 296504c..e5cc563 100644 --- a/llvm/lib/Target/X86/X86ScheduleBdVer2.td +++ b/llvm/lib/Target/X86/X86ScheduleBdVer2.td @@ -954,28 +954,28 @@ def PdWriteVEXTRACTF128rr : SchedWriteRes<[PdFPU01, PdFPFMA]> { let Latency = 2; let ReleaseAtCycles = [1, 2]; } -def : InstRW<[PdWriteVEXTRACTF128rr], (instrs VEXTRACTF128rr)>; +def : InstRW<[PdWriteVEXTRACTF128rr], (instrs VEXTRACTF128rri)>; def PdWriteVEXTRACTF128mr : SchedWriteRes<[PdFPU01, PdFPFMA]> { let Latency = 7; let ReleaseAtCycles = [1, 4]; let NumMicroOps = 2; } -def : InstRW<[PdWriteVEXTRACTF128mr], (instrs VEXTRACTF128mr)>; +def : InstRW<[PdWriteVEXTRACTF128mr], (instrs VEXTRACTF128mri)>; def PdWriteVPERM2F128rr : SchedWriteRes<[PdFPU01, PdFPFMA]> { let Latency = 4; let ReleaseAtCycles = [1, 6]; let NumMicroOps = 8; } -def : InstRW<[PdWriteVPERM2F128rr], (instrs VPERM2F128rr)>; +def : InstRW<[PdWriteVPERM2F128rr], (instrs VPERM2F128rri)>; def PdWriteVPERM2F128rm : SchedWriteRes<[PdFPU01, PdFPFMA]> { let Latency = 8; // 4 + 4 let ReleaseAtCycles = [1, 8]; let NumMicroOps = 10; } -def : InstRW<[PdWriteVPERM2F128rm], (instrs VPERM2F128rm)>; +def : InstRW<[PdWriteVPERM2F128rm], (instrs VPERM2F128rmi)>; //////////////////////////////////////////////////////////////////////////////// // Conversions. diff --git a/llvm/lib/Target/X86/X86ScheduleBtVer2.td b/llvm/lib/Target/X86/X86ScheduleBtVer2.td index 9cba933..d937825 100644 --- a/llvm/lib/Target/X86/X86ScheduleBtVer2.td +++ b/llvm/lib/Target/X86/X86ScheduleBtVer2.td @@ -807,7 +807,7 @@ def : InstRW<[JWriteINSERTQ], (instrs INSERTQ, INSERTQI)>; //////////////////////////////////////////////////////////////////////////////// def JWriteVecExtractF128: SchedWriteRes<[JFPU01, JFPX]>; -def : InstRW<[JWriteVecExtractF128], (instrs VEXTRACTF128rr)>; +def : InstRW<[JWriteVecExtractF128], (instrs VEXTRACTF128rri)>; def JWriteVBROADCASTYLd: SchedWriteRes<[JLAGU, JFPU01, JFPX]> { let Latency = 6; @@ -930,7 +930,7 @@ def JWriteVPERM2F128 : SchedWriteVariant<[ SchedVar<MCSchedPredicate<ZeroIdiomVPERMPredicate>, [JWriteZeroIdiomYmm]>, SchedVar<NoSchedPred, [WriteFShuffle256]> ]>; -def : InstRW<[JWriteVPERM2F128], (instrs VPERM2F128rr)>; +def : InstRW<[JWriteVPERM2F128], (instrs VPERM2F128rri)>; // This write is used for slow LEA instructions. def JWrite3OpsLEA : SchedWriteRes<[JALU1, JSAGU]> { @@ -1008,7 +1008,7 @@ def : IsZeroIdiomFunction<[ VXORPSYrr, VXORPDYrr, VANDNPSYrr, VANDNPDYrr ], ZeroIdiomPredicate>, - DepBreakingClass<[ VPERM2F128rr ], ZeroIdiomVPERMPredicate> + DepBreakingClass<[ VPERM2F128rri ], ZeroIdiomVPERMPredicate> ]>; def : IsDepBreakingFunction<[ diff --git a/llvm/lib/Target/X86/X86ScheduleZnver1.td b/llvm/lib/Target/X86/X86ScheduleZnver1.td index a044ddc..b1a7b2f 100644 --- a/llvm/lib/Target/X86/X86ScheduleZnver1.td +++ b/llvm/lib/Target/X86/X86ScheduleZnver1.td @@ -991,16 +991,16 @@ def ZnWriteVPERM2r : SchedWriteRes<[ZnFPU0, ZnFPU12]> { let Latency = 3; let ReleaseAtCycles = [3,3]; } -def : InstRW<[ZnWriteVPERM2r], (instrs VPERM2F128rr, - VPERM2I128rr)>; +def : InstRW<[ZnWriteVPERM2r], (instrs VPERM2F128rri, + VPERM2I128rri)>; def ZnWriteVPERM2m : SchedWriteRes<[ZnAGU, ZnFPU0, ZnFPU12]> { let NumMicroOps = 12; let Latency = 8; let ReleaseAtCycles = [1,3,3]; } -def : InstRW<[ZnWriteVPERM2m], (instrs VPERM2F128rm, - VPERM2I128rm)>; +def : InstRW<[ZnWriteVPERM2m], (instrs VPERM2F128rmi, + VPERM2I128rmi)>; def ZnWriteBROADCAST : SchedWriteRes<[ZnAGU, ZnFPU13]> { let NumMicroOps = 2; @@ -1017,7 +1017,7 @@ def ZnWriteEXTRACTPSr : SchedWriteRes<[ZnFPU12, ZnFPU2]> { let NumMicroOps = 2; let ReleaseAtCycles = [1, 2]; } -def : InstRW<[ZnWriteEXTRACTPSr], (instregex "(V?)EXTRACTPSrr")>; +def : InstRW<[ZnWriteEXTRACTPSr], (instregex "(V?)EXTRACTPSrri")>; def ZnWriteEXTRACTPSm : SchedWriteRes<[ZnAGU,ZnFPU12, ZnFPU2]> { let Latency = 5; @@ -1025,16 +1025,16 @@ def ZnWriteEXTRACTPSm : SchedWriteRes<[ZnAGU,ZnFPU12, ZnFPU2]> { let ReleaseAtCycles = [5, 1, 2]; } // m32,x,i. -def : InstRW<[ZnWriteEXTRACTPSm], (instregex "(V?)EXTRACTPSmr")>; +def : InstRW<[ZnWriteEXTRACTPSm], (instregex "(V?)EXTRACTPSmri")>; // VEXTRACTF128 / VEXTRACTI128. // x,y,i. -def : InstRW<[ZnWriteFPU013], (instrs VEXTRACTF128rr, - VEXTRACTI128rr)>; +def : InstRW<[ZnWriteFPU013], (instrs VEXTRACTF128rri, + VEXTRACTI128rri)>; // m128,y,i. -def : InstRW<[ZnWriteFPU013m], (instrs VEXTRACTF128mr, - VEXTRACTI128mr)>; +def : InstRW<[ZnWriteFPU013m], (instrs VEXTRACTF128mri, + VEXTRACTI128mri)>; def ZnWriteVINSERT128r: SchedWriteRes<[ZnFPU013]> { let Latency = 2; @@ -1047,10 +1047,10 @@ def ZnWriteVINSERT128Ld: SchedWriteRes<[ZnAGU,ZnFPU013]> { } // VINSERTF128 / VINSERTI128. // y,y,x,i. -def : InstRW<[ZnWriteVINSERT128r], (instrs VINSERTF128rr, - VINSERTI128rr)>; -def : InstRW<[ZnWriteVINSERT128Ld], (instrs VINSERTF128rm, - VINSERTI128rm)>; +def : InstRW<[ZnWriteVINSERT128r], (instrs VINSERTF128rri, + VINSERTI128rri)>; +def : InstRW<[ZnWriteVINSERT128Ld], (instrs VINSERTF128rmi, + VINSERTI128rmi)>; // VGATHER. def : InstRW<[WriteMicrocoded], (instregex "VGATHER(Q|D)(PD|PS)(Y?)rm")>; diff --git a/llvm/lib/Target/X86/X86ScheduleZnver2.td b/llvm/lib/Target/X86/X86ScheduleZnver2.td index c3a0f26..8ac095b 100644 --- a/llvm/lib/Target/X86/X86ScheduleZnver2.td +++ b/llvm/lib/Target/X86/X86ScheduleZnver2.td @@ -998,15 +998,15 @@ def Zn2WriteVPERM2r : SchedWriteRes<[Zn2FPU2]> { let NumMicroOps = 1; let Latency = 3; } -def : InstRW<[Zn2WriteVPERM2r], (instrs VPERM2F128rr, - VPERM2I128rr)>; +def : InstRW<[Zn2WriteVPERM2r], (instrs VPERM2F128rri, + VPERM2I128rri)>; def Zn2WriteVPERM2m : SchedWriteRes<[Zn2AGU, Zn2FPU2]> { let NumMicroOps = 1; let Latency = 8; } -def : InstRW<[Zn2WriteVPERM2m], (instrs VPERM2F128rm, - VPERM2I128rm)>; +def : InstRW<[Zn2WriteVPERM2m], (instrs VPERM2F128rmi, + VPERM2I128rmi)>; def Zn2WriteBROADCAST : SchedWriteRes<[Zn2AGU, Zn2FPU13]> { let NumMicroOps = 2; @@ -1023,7 +1023,7 @@ def Zn2WriteEXTRACTPSr : SchedWriteRes<[Zn2FPU12, Zn2FPU2]> { let NumMicroOps = 2; let ReleaseAtCycles = [1, 2]; } -def : InstRW<[Zn2WriteEXTRACTPSr], (instregex "(V?)EXTRACTPSrr")>; +def : InstRW<[Zn2WriteEXTRACTPSr], (instregex "(V?)EXTRACTPSrri")>; def Zn2WriteEXTRACTPSm : SchedWriteRes<[Zn2AGU,Zn2FPU12, Zn2FPU2]> { let Latency = 5; @@ -1031,16 +1031,16 @@ def Zn2WriteEXTRACTPSm : SchedWriteRes<[Zn2AGU,Zn2FPU12, Zn2FPU2]> { let ReleaseAtCycles = [5, 1, 2]; } // m32,x,i. -def : InstRW<[Zn2WriteEXTRACTPSm], (instregex "(V?)EXTRACTPSmr")>; +def : InstRW<[Zn2WriteEXTRACTPSm], (instregex "(V?)EXTRACTPSmri")>; // VEXTRACTF128 / VEXTRACTI128. // x,y,i. -def : InstRW<[Zn2WriteFPU013], (instrs VEXTRACTF128rr, - VEXTRACTI128rr)>; +def : InstRW<[Zn2WriteFPU013], (instrs VEXTRACTF128rri, + VEXTRACTI128rri)>; // m128,y,i. -def : InstRW<[Zn2WriteFPU013m], (instrs VEXTRACTF128mr, - VEXTRACTI128mr)>; +def : InstRW<[Zn2WriteFPU013m], (instrs VEXTRACTF128mri, + VEXTRACTI128mri)>; def Zn2WriteVINSERT128r: SchedWriteRes<[Zn2FPU013]> { let Latency = 2; @@ -1052,10 +1052,10 @@ def Zn2WriteVINSERT128Ld: SchedWriteRes<[Zn2AGU,Zn2FPU013]> { } // VINSERTF128 / VINSERTI128. // y,y,x,i. -def : InstRW<[Zn2WriteVINSERT128r], (instrs VINSERTF128rr, - VINSERTI128rr)>; -def : InstRW<[Zn2WriteVINSERT128Ld], (instrs VINSERTF128rm, - VINSERTI128rm)>; +def : InstRW<[Zn2WriteVINSERT128r], (instrs VINSERTF128rri, + VINSERTI128rri)>; +def : InstRW<[Zn2WriteVINSERT128Ld], (instrs VINSERTF128rmi, + VINSERTI128rmi)>; // VGATHER. def : InstRW<[WriteMicrocoded], (instregex "VGATHER(Q|D)(PD|PS)(Y?)rm")>; diff --git a/llvm/lib/Target/X86/X86ScheduleZnver3.td b/llvm/lib/Target/X86/X86ScheduleZnver3.td index cbf1de8..9e271c1 100644 --- a/llvm/lib/Target/X86/X86ScheduleZnver3.td +++ b/llvm/lib/Target/X86/X86ScheduleZnver3.td @@ -989,21 +989,21 @@ def Zn3WriteVEXTRACTF128rr_VEXTRACTI128rr : SchedWriteRes<[Zn3FPFMisc0]> { let ReleaseAtCycles = [1]; let NumMicroOps = 1; } -def : InstRW<[Zn3WriteVEXTRACTF128rr_VEXTRACTI128rr], (instrs VEXTRACTF128rr, VEXTRACTI128rr)>; +def : InstRW<[Zn3WriteVEXTRACTF128rr_VEXTRACTI128rr], (instrs VEXTRACTF128rri, VEXTRACTI128rri)>; def Zn3WriteVEXTRACTI128mr : SchedWriteRes<[Zn3FPFMisc0, Zn3FPSt, Zn3Store]> { let Latency = !add(Znver3Model.LoadLatency, Zn3WriteVEXTRACTF128rr_VEXTRACTI128rr.Latency); let ReleaseAtCycles = [1, 1, 1]; let NumMicroOps = !add(Zn3WriteVEXTRACTF128rr_VEXTRACTI128rr.NumMicroOps, 1); } -def : InstRW<[Zn3WriteVEXTRACTI128mr], (instrs VEXTRACTI128mr, VEXTRACTF128mr)>; +def : InstRW<[Zn3WriteVEXTRACTI128mr], (instrs VEXTRACTI128mri, VEXTRACTF128mri)>; def Zn3WriteVINSERTF128rmr : SchedWriteRes<[Zn3AGU012, Zn3Load, Zn3FPFMisc0]> { let Latency = !add(Znver3Model.LoadLatency, Zn3WriteVEXTRACTF128rr_VEXTRACTI128rr.Latency); let ReleaseAtCycles = [1, 1, 1]; let NumMicroOps = !add(Zn3WriteVEXTRACTF128rr_VEXTRACTI128rr.NumMicroOps, 0); } -def : InstRW<[Zn3WriteVINSERTF128rmr], (instrs VINSERTF128rm)>; +def : InstRW<[Zn3WriteVINSERTF128rmr], (instrs VINSERTF128rmi)>; defm : Zn3WriteResYMM<WriteVecStoreY, [Zn3FPSt, Zn3Store], Znver3Model.StoreLatency, [1, 1], 1>; defm : Zn3WriteResXMM<WriteVecStoreNT, [Zn3FPSt, Zn3Store], Znver3Model.StoreLatency, [1, 1], 1>; @@ -1335,14 +1335,14 @@ def Zn3WriteVPERM2I128rr_VPERM2F128rr : SchedWriteRes<[Zn3FPVShuf]> { let ReleaseAtCycles = [1]; let NumMicroOps = 1; } -def : InstRW<[Zn3WriteVPERM2I128rr_VPERM2F128rr], (instrs VPERM2I128rr, VPERM2F128rr)>; +def : InstRW<[Zn3WriteVPERM2I128rr_VPERM2F128rr], (instrs VPERM2I128rri, VPERM2F128rri)>; def Zn3WriteVPERM2F128rm : SchedWriteRes<[Zn3AGU012, Zn3Load, Zn3FPVShuf]> { let Latency = !add(Znver3Model.LoadLatency, Zn3WriteVPERM2I128rr_VPERM2F128rr.Latency); let ReleaseAtCycles = [1, 1, 1]; let NumMicroOps = !add(Zn3WriteVPERM2I128rr_VPERM2F128rr.NumMicroOps, 0); } -def : InstRW<[Zn3WriteVPERM2F128rm], (instrs VPERM2F128rm)>; +def : InstRW<[Zn3WriteVPERM2F128rm], (instrs VPERM2F128rmi)>; def Zn3WriteVPERMPSYrm : SchedWriteRes<[Zn3AGU012, Zn3Load, Zn3FPVShuf]> { let Latency = !add(Znver3Model.LoadLatency, 7); diff --git a/llvm/lib/Target/X86/X86ScheduleZnver4.td b/llvm/lib/Target/X86/X86ScheduleZnver4.td index 6181ee8..f82f9a8 100644 --- a/llvm/lib/Target/X86/X86ScheduleZnver4.td +++ b/llvm/lib/Target/X86/X86ScheduleZnver4.td @@ -1001,21 +1001,21 @@ def Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr : SchedWriteRes<[Zn4FPFMisc0]> { let ReleaseAtCycles = [1]; let NumMicroOps = 1; } -def : InstRW<[Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr], (instrs VEXTRACTF128rr, VEXTRACTI128rr)>; +def : InstRW<[Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr], (instrs VEXTRACTF128rri, VEXTRACTI128rri)>; def Zn4WriteVEXTRACTI128mr : SchedWriteRes<[Zn4FPFMisc0, Zn4FPSt, Zn4Store]> { let Latency = !add(Znver4Model.LoadLatency, Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr.Latency); let ReleaseAtCycles = [1, 1, 1]; let NumMicroOps = !add(Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr.NumMicroOps, 1); } -def : InstRW<[Zn4WriteVEXTRACTI128mr], (instrs VEXTRACTI128mr, VEXTRACTF128mr)>; +def : InstRW<[Zn4WriteVEXTRACTI128mr], (instrs VEXTRACTI128mri, VEXTRACTF128mri)>; def Zn4WriteVINSERTF128rmr : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPFMisc0]> { let Latency = !add(Znver4Model.LoadLatency, Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr.Latency); let ReleaseAtCycles = [1, 1, 1]; let NumMicroOps = !add(Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr.NumMicroOps, 0); } -def : InstRW<[Zn4WriteVINSERTF128rmr], (instrs VINSERTF128rm)>; +def : InstRW<[Zn4WriteVINSERTF128rmr], (instrs VINSERTF128rmi)>; defm : Zn4WriteResYMM<WriteVecStoreY, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [1, 1], 1>; defm : Zn4WriteResXMM<WriteVecStoreNT, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [1, 1], 1>; @@ -1375,14 +1375,14 @@ def Zn4WriteVPERM2I128rr_VPERM2F128rr : SchedWriteRes<[Zn4FPVShuf]> { let ReleaseAtCycles = [1]; let NumMicroOps = 1; } -def : InstRW<[Zn4WriteVPERM2I128rr_VPERM2F128rr], (instrs VPERM2I128rr, VPERM2F128rr)>; +def : InstRW<[Zn4WriteVPERM2I128rr_VPERM2F128rr], (instrs VPERM2I128rri, VPERM2F128rri)>; def Zn4WriteVPERM2F128rm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPVShuf]> { let Latency = !add(Znver4Model.LoadLatency, Zn4WriteVPERM2I128rr_VPERM2F128rr.Latency); let ReleaseAtCycles = [1, 1, 1]; let NumMicroOps = !add(Zn4WriteVPERM2I128rr_VPERM2F128rr.NumMicroOps, 0); } -def : InstRW<[Zn4WriteVPERM2F128rm], (instrs VPERM2F128rm)>; +def : InstRW<[Zn4WriteVPERM2F128rm], (instrs VPERM2F128rmi)>; def Zn4WriteVPERMPSYrr : SchedWriteRes<[Zn4FPVShuf]> { let Latency = 7; diff --git a/llvm/lib/Target/X86/X86TileConfig.cpp b/llvm/lib/Target/X86/X86TileConfig.cpp index 4552820..30295e9 100644 --- a/llvm/lib/Target/X86/X86TileConfig.cpp +++ b/llvm/lib/Target/X86/X86TileConfig.cpp @@ -128,9 +128,10 @@ bool X86TileConfig::runOnMachineFunction(MachineFunction &MF) { continue; if (MRI.getRegClass(VirtReg)->getID() != X86::TILERegClassID) continue; - if (VRM.getPhys(VirtReg) == VirtRegMap::NO_PHYS_REG) + MCRegister PhysReg = VRM.getPhys(VirtReg); + if (!PhysReg) continue; - unsigned Index = VRM.getPhys(VirtReg) - X86::TMM0; + unsigned Index = PhysReg - X86::TMM0; if (!Phys2Virt[Index]) Phys2Virt[Index] = VirtReg; } diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp index cd94661..28da864 100644 --- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp +++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp @@ -2364,8 +2364,8 @@ struct AAICVTrackerFunction : public AAICVTracker { /// TODO: Figure out a way to avoid adding entry in /// ICVReplacementValuesMap Instruction *Entry = &F->getEntryBlock().front(); - if (HasChanged == ChangeStatus::CHANGED && !ValuesMap.count(Entry)) - ValuesMap.insert(std::make_pair(Entry, nullptr)); + if (HasChanged == ChangeStatus::CHANGED) + ValuesMap.try_emplace(Entry); } return HasChanged; diff --git a/llvm/lib/Transforms/IPO/SCCP.cpp b/llvm/lib/Transforms/IPO/SCCP.cpp index ef95ec0..e80c6f7 100644 --- a/llvm/lib/Transforms/IPO/SCCP.cpp +++ b/llvm/lib/Transforms/IPO/SCCP.cpp @@ -278,6 +278,7 @@ static bool runIPSCCP( SmallVector<ReturnInst*, 8> ReturnsToZap; Solver.inferReturnAttributes(); + Solver.inferArgAttributes(); for (const auto &[F, ReturnValue] : Solver.getTrackedRetVals()) { assert(!F->getReturnType()->isVoidTy() && "should not track void functions"); diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp index 6af284d..5d1ec72 100644 --- a/llvm/lib/Transforms/IPO/SampleProfile.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp @@ -73,9 +73,9 @@ #include "llvm/Transforms/IPO/SampleContextTracker.h" #include "llvm/Transforms/IPO/SampleProfileMatcher.h" #include "llvm/Transforms/IPO/SampleProfileProbe.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Utils/CallPromotionUtils.h" #include "llvm/Transforms/Utils/Cloning.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include "llvm/Transforms/Utils/MisExpect.h" #include "llvm/Transforms/Utils/SampleProfileLoaderBaseImpl.h" #include "llvm/Transforms/Utils/SampleProfileLoaderBaseUtil.h" diff --git a/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp b/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp index b489d4f..d84856f 100644 --- a/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp @@ -29,7 +29,7 @@ #include "llvm/Support/CRC.h" #include "llvm/Support/CommandLine.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Transforms/Instrumentation.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include "llvm/Transforms/Utils/ModuleUtils.h" #include <unordered_set> #include <vector> diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index 8e8d472..5cdfead 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -8178,6 +8178,75 @@ static Instruction *foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI, return nullptr; } +static Instruction *foldFCmpWithFloorAndCeil(FCmpInst &I, + InstCombinerImpl &IC) { + Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); + Type *OpType = LHS->getType(); + CmpInst::Predicate Pred = I.getPredicate(); + + bool FloorX = match(LHS, m_Intrinsic<Intrinsic::floor>(m_Specific(RHS))); + bool CeilX = match(LHS, m_Intrinsic<Intrinsic::ceil>(m_Specific(RHS))); + + if (!FloorX && !CeilX) { + if ((FloorX = match(RHS, m_Intrinsic<Intrinsic::floor>(m_Specific(LHS)))) || + (CeilX = match(RHS, m_Intrinsic<Intrinsic::ceil>(m_Specific(LHS))))) { + std::swap(LHS, RHS); + Pred = I.getSwappedPredicate(); + } + } + + switch (Pred) { + case FCmpInst::FCMP_OLE: + // fcmp ole floor(x), x => fcmp ord x, 0 + if (FloorX) + return new FCmpInst(FCmpInst::FCMP_ORD, RHS, ConstantFP::getZero(OpType), + "", &I); + break; + case FCmpInst::FCMP_OGT: + // fcmp ogt floor(x), x => false + if (FloorX) + return IC.replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); + break; + case FCmpInst::FCMP_OGE: + // fcmp oge ceil(x), x => fcmp ord x, 0 + if (CeilX) + return new FCmpInst(FCmpInst::FCMP_ORD, RHS, ConstantFP::getZero(OpType), + "", &I); + break; + case FCmpInst::FCMP_OLT: + // fcmp olt ceil(x), x => false + if (CeilX) + return IC.replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); + break; + case FCmpInst::FCMP_ULE: + // fcmp ule floor(x), x => true + if (FloorX) + return IC.replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); + break; + case FCmpInst::FCMP_UGT: + // fcmp ugt floor(x), x => fcmp uno x, 0 + if (FloorX) + return new FCmpInst(FCmpInst::FCMP_UNO, RHS, ConstantFP::getZero(OpType), + "", &I); + break; + case FCmpInst::FCMP_UGE: + // fcmp uge ceil(x), x => true + if (CeilX) + return IC.replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); + break; + case FCmpInst::FCMP_ULT: + // fcmp ult ceil(x), x => fcmp uno x, 0 + if (CeilX) + return new FCmpInst(FCmpInst::FCMP_UNO, RHS, ConstantFP::getZero(OpType), + "", &I); + break; + default: + break; + } + + return nullptr; +} + Instruction *InstCombinerImpl::visitFCmpInst(FCmpInst &I) { bool Changed = false; @@ -8382,6 +8451,9 @@ Instruction *InstCombinerImpl::visitFCmpInst(FCmpInst &I) { if (Instruction *R = foldSqrtWithFcmpZero(I, *this)) return R; + if (Instruction *R = foldFCmpWithFloorAndCeil(I, *this)) + return R; + if (match(Op0, m_FNeg(m_Value(X)))) { // fcmp pred (fneg X), C --> fcmp swap(pred) X, -C Constant *C; diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 5de0a78..16e43be 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -71,11 +71,11 @@ #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/TargetParser/Triple.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h" #include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h" #include "llvm/Transforms/Utils/ASanStackFrameLayout.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/ModuleUtils.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" diff --git a/llvm/lib/Transforms/Instrumentation/CGProfile.cpp b/llvm/lib/Transforms/Instrumentation/CGProfile.cpp index ebd7dae..60e7acc 100644 --- a/llvm/lib/Transforms/Instrumentation/CGProfile.cpp +++ b/llvm/lib/Transforms/Instrumentation/CGProfile.cpp @@ -17,7 +17,7 @@ #include "llvm/IR/Module.h" #include "llvm/IR/PassManager.h" #include "llvm/ProfileData/InstrProf.h" -#include "llvm/Transforms/Instrumentation.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include <optional> using namespace llvm; diff --git a/llvm/lib/Transforms/Instrumentation/CMakeLists.txt b/llvm/lib/Transforms/Instrumentation/CMakeLists.txt index d45b074..3e3c3ec 100644 --- a/llvm/lib/Transforms/Instrumentation/CMakeLists.txt +++ b/llvm/lib/Transforms/Instrumentation/CMakeLists.txt @@ -10,7 +10,6 @@ add_llvm_component_library(LLVMInstrumentation MemorySanitizer.cpp NumericalStabilitySanitizer.cpp IndirectCallPromotion.cpp - Instrumentation.cpp InstrOrderFile.cpp InstrProfiling.cpp KCFI.cpp diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp index b4b5f67..20fdf28 100644 --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -104,8 +104,8 @@ #include "llvm/Support/SpecialCaseList.h" #include "llvm/Support/VirtualFileSystem.h" #include "llvm/TargetParser/Triple.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include "llvm/Transforms/Utils/Local.h" #include <algorithm> #include <cassert> diff --git a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp index c7f6f2a..1d4f85a 100644 --- a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp +++ b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp @@ -36,9 +36,9 @@ #include "llvm/Support/Path.h" #include "llvm/Support/Regex.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Instrumentation/CFGMST.h" #include "llvm/Transforms/Instrumentation/GCOVProfiler.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include "llvm/Transforms/Utils/ModuleUtils.h" #include <algorithm> #include <memory> diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp index a7e7f9a..da7dc18 100644 --- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp @@ -57,9 +57,9 @@ #include "llvm/Support/RandomNumberGenerator.h" #include "llvm/Support/raw_ostream.h" #include "llvm/TargetParser/Triple.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/MemoryTaggingSupport.h" #include "llvm/Transforms/Utils/ModuleUtils.h" diff --git a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp index fbed593..76d6033 100644 --- a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp +++ b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp @@ -37,9 +37,9 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/Error.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Instrumentation/PGOInstrumentation.h" #include "llvm/Transforms/Utils/CallPromotionUtils.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include <cassert> #include <cstdint> #include <memory> diff --git a/llvm/lib/Transforms/Instrumentation/InstrOrderFile.cpp b/llvm/lib/Transforms/Instrumentation/InstrOrderFile.cpp index 6882dd8..8115662 100644 --- a/llvm/lib/Transforms/Instrumentation/InstrOrderFile.cpp +++ b/llvm/lib/Transforms/Instrumentation/InstrOrderFile.cpp @@ -19,7 +19,7 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Transforms/Instrumentation.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include <fstream> #include <mutex> #include <sstream> diff --git a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp index 25bed6d..014e049 100644 --- a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp +++ b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp @@ -50,9 +50,9 @@ #include "llvm/Support/Error.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/TargetParser/Triple.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Instrumentation/PGOInstrumentation.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include "llvm/Transforms/Utils/ModuleUtils.h" #include "llvm/Transforms/Utils/SSAUpdater.h" #include <algorithm> diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 17c5638..07d6674 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -198,8 +198,8 @@ #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/TargetParser/Triple.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/ModuleUtils.h" #include <algorithm> diff --git a/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp index ffd9faf..3cefc1a 100644 --- a/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp @@ -38,9 +38,9 @@ #include "llvm/Support/MathExtras.h" #include "llvm/Support/Regex.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/EscapeEnumerator.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/ModuleUtils.h" diff --git a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp index e5a522d..b985d5b 100644 --- a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp +++ b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp @@ -107,10 +107,10 @@ #include "llvm/Support/VirtualFileSystem.h" #include "llvm/Support/raw_ostream.h" #include "llvm/TargetParser/Triple.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Instrumentation/BlockCoverageInference.h" #include "llvm/Transforms/Instrumentation/CFGMST.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include "llvm/Transforms/Utils/MisExpect.h" #include "llvm/Transforms/Utils/ModuleUtils.h" #include <algorithm> diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index 6738521..68cf4e5 100644 --- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -42,8 +42,8 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Utils/EscapeEnumerator.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/ModuleUtils.h" diff --git a/llvm/lib/Transforms/Utils/CMakeLists.txt b/llvm/lib/Transforms/Utils/CMakeLists.txt index b5a7eed..36761cf 100644 --- a/llvm/lib/Transforms/Utils/CMakeLists.txt +++ b/llvm/lib/Transforms/Utils/CMakeLists.txt @@ -35,6 +35,7 @@ add_llvm_component_library(LLVMTransformUtils InlineFunction.cpp InjectTLIMappings.cpp InstructionNamer.cpp + Instrumentation.cpp IntegerDivision.cpp LCSSA.cpp LibCallsShrinkWrap.cpp diff --git a/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp b/llvm/lib/Transforms/Utils/Instrumentation.cpp index 9c436da..92e07ae 100644 --- a/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp +++ b/llvm/lib/Transforms/Utils/Instrumentation.cpp @@ -11,7 +11,7 @@ // //===----------------------------------------------------------------------===// -#include "llvm/Transforms/Instrumentation.h" +#include "llvm/Transforms/Utils/Instrumentation.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/IR/IntrinsicInst.h" @@ -41,7 +41,8 @@ bool llvm::checkIfAlreadyInstrumented(Module &M, StringRef Flag) { } /// Moves I before IP. Returns new insert point. -static BasicBlock::iterator moveBeforeInsertPoint(BasicBlock::iterator I, BasicBlock::iterator IP) { +static BasicBlock::iterator moveBeforeInsertPoint(BasicBlock::iterator I, + BasicBlock::iterator IP) { // If I is IP, move the insert point down. if (I == IP) { ++IP; @@ -93,7 +94,8 @@ GlobalVariable *llvm::createPrivateGlobalForString(Module &M, StringRef Str, } Comdat *llvm::getOrCreateFunctionComdat(Function &F, Triple &T) { - if (auto Comdat = F.getComdat()) return Comdat; + if (auto Comdat = F.getComdat()) + return Comdat; assert(F.hasName()); Module *M = F.getParent(); diff --git a/llvm/lib/Transforms/Utils/SCCPSolver.cpp b/llvm/lib/Transforms/Utils/SCCPSolver.cpp index ce58e8e..101d605 100644 --- a/llvm/lib/Transforms/Utils/SCCPSolver.cpp +++ b/llvm/lib/Transforms/Utils/SCCPSolver.cpp @@ -341,31 +341,45 @@ bool SCCPSolver::removeNonFeasibleEdges(BasicBlock *BB, DomTreeUpdater &DTU, return true; } +static void inferAttribute(Function *F, unsigned AttrIndex, + const ValueLatticeElement &Val) { + // If there is a known constant range for the value, add range attribute. + if (Val.isConstantRange() && !Val.getConstantRange().isSingleElement()) { + // Do not add range attribute if the value may include undef. + if (Val.isConstantRangeIncludingUndef()) + return; + + // Take the intersection of the existing attribute and the inferred range. + Attribute OldAttr = F->getAttributeAtIndex(AttrIndex, Attribute::Range); + ConstantRange CR = Val.getConstantRange(); + if (OldAttr.isValid()) + CR = CR.intersectWith(OldAttr.getRange()); + F->addAttributeAtIndex( + AttrIndex, Attribute::get(F->getContext(), Attribute::Range, CR)); + return; + } + // Infer nonnull attribute. + if (Val.isNotConstant() && Val.getNotConstant()->getType()->isPointerTy() && + Val.getNotConstant()->isNullValue() && + !F->hasAttributeAtIndex(AttrIndex, Attribute::NonNull)) { + F->addAttributeAtIndex(AttrIndex, + Attribute::get(F->getContext(), Attribute::NonNull)); + } +} + void SCCPSolver::inferReturnAttributes() const { - for (const auto &[F, ReturnValue] : getTrackedRetVals()) { - - // If there is a known constant range for the return value, add range - // attribute to the return value. - if (ReturnValue.isConstantRange() && - !ReturnValue.getConstantRange().isSingleElement()) { - // Do not add range metadata if the return value may include undef. - if (ReturnValue.isConstantRangeIncludingUndef()) - continue; + for (const auto &[F, ReturnValue] : getTrackedRetVals()) + inferAttribute(F, AttributeList::ReturnIndex, ReturnValue); +} - // Take the intersection of the existing attribute and the inferred range. - ConstantRange CR = ReturnValue.getConstantRange(); - if (F->hasRetAttribute(Attribute::Range)) - CR = CR.intersectWith(F->getRetAttribute(Attribute::Range).getRange()); - F->addRangeRetAttr(CR); - continue; - } - // Infer nonnull return attribute. - if (F->getReturnType()->isPointerTy() && ReturnValue.isNotConstant() && - ReturnValue.getNotConstant()->isNullValue() && - !F->hasRetAttribute(Attribute::NonNull)) { - F->addRetAttr(Attribute::NonNull); +void SCCPSolver::inferArgAttributes() const { + for (Function *F : getArgumentTrackedFunctions()) { + if (!isBlockExecutable(&F->front())) continue; - } + for (Argument &A : F->args()) + if (!A.getType()->isStructTy()) + inferAttribute(F, AttributeList::FirstArgIndex + A.getArgNo(), + getLatticeValueFor(&A)); } } @@ -766,6 +780,10 @@ public: return TrackingIncomingArguments.count(F); } + const SmallPtrSetImpl<Function *> &getArgumentTrackedFunctions() const { + return TrackingIncomingArguments; + } + void solve(); bool resolvedUndef(Instruction &I); @@ -2140,6 +2158,11 @@ bool SCCPSolver::isArgumentTrackedFunction(Function *F) { return Visitor->isArgumentTrackedFunction(F); } +const SmallPtrSetImpl<Function *> & +SCCPSolver::getArgumentTrackedFunctions() const { + return Visitor->getArgumentTrackedFunctions(); +} + void SCCPSolver::solve() { Visitor->solve(); } bool SCCPSolver::resolvedUndefsIn(Function &F) { diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index f9db996..5a694b5 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -3040,7 +3040,7 @@ static bool isSafeCheapLoadStore(const Instruction *I, /// %sub = sub %x, %y /// br label BB2 /// EndBB: -/// %phi = phi [ %sub, %ThenBB ], [ 0, %EndBB ] +/// %phi = phi [ %sub, %ThenBB ], [ 0, %BB ] /// ... /// \endcode /// @@ -3338,9 +3338,20 @@ bool SimplifyCFGOpt::speculativelyExecuteBB(BranchInst *BI, if (auto *LI = dyn_cast<LoadInst>(I)) { // Handle Load. auto *Ty = I->getType(); - MaskedLoadStore = Builder.CreateMaskedLoad(FixedVectorType::get(Ty, 1), - Op0, LI->getAlign(), Mask); - I->replaceAllUsesWith(Builder.CreateBitCast(MaskedLoadStore, Ty)); + PHINode *PN = nullptr; + Value *PassThru = nullptr; + for (User *U : I->users()) + if ((PN = dyn_cast<PHINode>(U))) { + PassThru = Builder.CreateBitCast(PN->getIncomingValueForBlock(BB), + FixedVectorType::get(Ty, 1)); + break; + } + MaskedLoadStore = Builder.CreateMaskedLoad( + FixedVectorType::get(Ty, 1), Op0, LI->getAlign(), Mask, PassThru); + Value *NewLoadStore = Builder.CreateBitCast(MaskedLoadStore, Ty); + if (PN) + PN->setIncomingValue(PN->getBasicBlockIndex(BB), NewLoadStore); + I->replaceAllUsesWith(NewLoadStore); } else { // Handle Store. auto *StoredVal = diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 3a313a9..0fa7c2a 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -4344,12 +4344,11 @@ bool LoopVectorizationPlanner::isMoreProfitable( void LoopVectorizationPlanner::emitInvalidCostRemarks( OptimizationRemarkEmitter *ORE) { using RecipeVFPair = std::pair<VPRecipeBase *, ElementCount>; - LLVMContext &LLVMCtx = OrigLoop->getHeader()->getContext(); SmallVector<RecipeVFPair> InvalidCosts; for (const auto &Plan : VPlans) { for (ElementCount VF : Plan->vectorFactors()) { VPCostContext CostCtx(CM.TTI, *CM.TLI, Legal->getWidestInductionType(), - LLVMCtx, CM); + CM); auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry()); for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { for (auto &R : *VPBB) { @@ -4452,8 +4451,7 @@ void LoopVectorizationPlanner::emitInvalidCostRemarks( static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI) { assert(VF.isVector() && "Checking a scalar VF?"); - VPTypeAnalysis TypeInfo(Plan.getCanonicalIV()->getScalarType(), - Plan.getCanonicalIV()->getScalarType()->getContext()); + VPTypeAnalysis TypeInfo(Plan.getCanonicalIV()->getScalarType()); DenseSet<VPRecipeBase *> EphemeralRecipes; collectEphemeralRecipesForVPlan(Plan, EphemeralRecipes); // Set of already visited types. @@ -7091,6 +7089,8 @@ void LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { InstructionCost VPCostContext::getLegacyCost(Instruction *UI, ElementCount VF) const { + if (ForceTargetInstructionCost.getNumOccurrences()) + return InstructionCost(ForceTargetInstructionCost.getNumOccurrences()); return CM.getInstructionCost(UI, VF); } @@ -7194,6 +7194,9 @@ LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF, // for now. // TODO: Switch to costing based on VPlan once the logic has been ported. for (const auto &[RedPhi, RdxDesc] : Legal->getReductionVars()) { + if (ForceTargetInstructionCost.getNumOccurrences()) + continue; + if (!CM.isInLoopReduction(RedPhi) && !RecurrenceDescriptor::isAnyOfRecurrenceKind( RdxDesc.getRecurrenceKind())) @@ -7262,9 +7265,7 @@ LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF, InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan, ElementCount VF) const { - LLVMContext &LLVMCtx = OrigLoop->getHeader()->getContext(); - VPCostContext CostCtx(CM.TTI, *CM.TLI, Legal->getWidestInductionType(), - LLVMCtx, CM); + VPCostContext CostCtx(CM.TTI, *CM.TLI, Legal->getWidestInductionType(), CM); InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx); // Now compute and add the VPlan-based cost. @@ -7383,9 +7384,7 @@ VectorizationFactor LoopVectorizationPlanner::computeBestVF() { // simplifications not accounted for in the legacy cost model. If that's the // case, don't trigger the assertion, as the extra simplifications may cause a // different VF to be picked by the VPlan-based cost model. - LLVMContext &LLVMCtx = OrigLoop->getHeader()->getContext(); - VPCostContext CostCtx(CM.TTI, *CM.TLI, Legal->getWidestInductionType(), - LLVMCtx, CM); + VPCostContext CostCtx(CM.TTI, *CM.TLI, Legal->getWidestInductionType(), CM); precomputeCosts(BestPlan, BestFactor.Width, CostCtx); assert((BestFactor.Width == LegacyVF.Width || planContainsAdditionalSimplifications(getPlanFor(BestFactor.Width), @@ -7522,8 +7521,7 @@ LoopVectorizationPlanner::executePlan( LLVM_DEBUG(BestVPlan.dump()); // Perform the actual loop transformation. - VPTransformState State(BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan, - OrigLoop->getHeader()->getContext()); + VPTransformState State(BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan); // 0. Generate SCEV-dependent code into the preheader, including TripCount, // before making any changes to the CFG. @@ -8630,11 +8628,12 @@ static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, bool HasNUW, {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL); } -// Collect (ExitPhi, ExitingValue) pairs phis in the original exit block that -// are modeled in VPlan. Some exiting values are not modeled explicitly yet and -// won't be included. Those are un-truncated VPWidenIntOrFpInductionRecipe, -// VPWidenPointerInductionRecipe and induction increments. -static MapVector<PHINode *, VPValue *> collectUsersInExitBlock( +// Collect VPIRInstructions for phis in the original exit block that are modeled +// in VPlan and add the exiting VPValue as operand. Some exiting values are not +// modeled explicitly yet and won't be included. Those are un-truncated +// VPWidenIntOrFpInductionRecipe, VPWidenPointerInductionRecipe and induction +// increments. +static SetVector<VPIRInstruction *> collectUsersInExitBlock( Loop *OrigLoop, VPRecipeBuilder &Builder, VPlan &Plan, const MapVector<PHINode *, InductionDescriptor> &Inductions) { auto *MiddleVPBB = @@ -8644,13 +8643,17 @@ static MapVector<PHINode *, VPValue *> collectUsersInExitBlock( // from scalar loop only. if (MiddleVPBB->getNumSuccessors() != 2) return {}; - MapVector<PHINode *, VPValue *> ExitingValuesToFix; - BasicBlock *ExitBB = - cast<VPIRBasicBlock>(MiddleVPBB->getSuccessors()[0])->getIRBasicBlock(); + SetVector<VPIRInstruction *> ExitUsersToFix; + VPBasicBlock *ExitVPBB = cast<VPIRBasicBlock>(MiddleVPBB->getSuccessors()[0]); BasicBlock *ExitingBB = OrigLoop->getExitingBlock(); - for (PHINode &ExitPhi : ExitBB->phis()) { - Value *IncomingValue = - ExitPhi.getIncomingValueForBlock(ExitingBB); + for (VPRecipeBase &R : *ExitVPBB) { + auto *ExitIRI = dyn_cast<VPIRInstruction>(&R); + if (!ExitIRI) + continue; + auto *ExitPhi = dyn_cast<PHINode>(&ExitIRI->getInstruction()); + if (!ExitPhi) + break; + Value *IncomingValue = ExitPhi->getIncomingValueForBlock(ExitingBB); VPValue *V = Builder.getVPValueOrAddLiveIn(IncomingValue); // Exit values for inductions are computed and updated outside of VPlan and // independent of induction recipes. @@ -8666,17 +8669,18 @@ static MapVector<PHINode *, VPValue *> collectUsersInExitBlock( return P && Inductions.contains(P); }))) continue; - ExitingValuesToFix.insert({&ExitPhi, V}); + ExitUsersToFix.insert(ExitIRI); + ExitIRI->addOperand(V); } - return ExitingValuesToFix; + return ExitUsersToFix; } -// Add exit values to \p Plan. Extracts and VPLiveOuts are added for each entry -// in \p ExitingValuesToFix. +// Add exit values to \p Plan. Extracts are added for each entry in \p +// ExitUsersToFix if needed and their operands are updated. static void addUsersInExitBlock(VPlan &Plan, - MapVector<PHINode *, VPValue *> &ExitingValuesToFix) { - if (ExitingValuesToFix.empty()) + const SetVector<VPIRInstruction *> &ExitUsersToFix) { + if (ExitUsersToFix.empty()) return; auto *MiddleVPBB = @@ -8685,18 +8689,19 @@ addUsersInExitBlock(VPlan &Plan, cast<VPIRBasicBlock>(MiddleVPBB->getSuccessors()[0])->getIRBasicBlock(); VPBuilder B(MiddleVPBB, MiddleVPBB->getFirstNonPhi()); - // Introduce VPUsers modeling the exit values. - for (const auto &[ExitPhi, V] : ExitingValuesToFix) { + // Introduce extract for exiting values and update the VPIRInstructions + // modeling the corresponding LCSSA phis. + for (VPIRInstruction *ExitIRI : ExitUsersToFix) { + VPValue *V = ExitIRI->getOperand(0); // Pass live-in values used by exit phis directly through to the live-out. - if (V->isLiveIn()) { - Plan.addLiveOut(ExitPhi, V); + if (V->isLiveIn()) continue; - } + VPValue *Ext = B.createNaryOp( VPInstruction::ExtractFromEnd, {V, Plan.getOrAddLiveIn(ConstantInt::get( IntegerType::get(ExitBB->getContext(), 32), 1))}); - Plan.addLiveOut(ExitPhi, Ext); + ExitIRI->setOperand(0, Ext); } } @@ -8709,7 +8714,7 @@ addUsersInExitBlock(VPlan &Plan, /// 2. Feed the penultimate value of recurrences to their LCSSA phi users in /// the original exit block using a VPLiveOut. static void addLiveOutsForFirstOrderRecurrences( - VPlan &Plan, MapVector<PHINode *, VPValue *> &ExitingValuesToFix) { + VPlan &Plan, SetVector<VPIRInstruction *> &ExitUsersToFix) { VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion(); // Start by finding out if middle block branches to scalar preheader, which is @@ -8719,21 +8724,15 @@ static void addLiveOutsForFirstOrderRecurrences( // Plan->getScalarLoopRegion()->getSinglePredecessor() in the future once the // scalar region is modeled as well. auto *MiddleVPBB = cast<VPBasicBlock>(VectorRegion->getSingleSuccessor()); - BasicBlock *ExitBB = nullptr; VPBasicBlock *ScalarPHVPBB = nullptr; if (MiddleVPBB->getNumSuccessors() == 2) { // Order is strict: first is the exit block, second is the scalar preheader. - ExitBB = - cast<VPIRBasicBlock>(MiddleVPBB->getSuccessors()[0])->getIRBasicBlock(); ScalarPHVPBB = cast<VPBasicBlock>(MiddleVPBB->getSuccessors()[1]); - } else if (ExitingValuesToFix.empty()) { + } else if (ExitUsersToFix.empty()) { ScalarPHVPBB = cast<VPBasicBlock>(MiddleVPBB->getSingleSuccessor()); - } else { - ExitBB = cast<VPIRBasicBlock>(MiddleVPBB->getSingleSuccessor()) - ->getIRBasicBlock(); } if (!ScalarPHVPBB) { - assert(ExitingValuesToFix.empty() && + assert(ExitUsersToFix.empty() && "missed inserting extracts for exiting values"); return; } @@ -8827,24 +8826,17 @@ static void addLiveOutsForFirstOrderRecurrences( auto *FORPhi = cast<PHINode>(FOR->getUnderlyingInstr()); Plan.addLiveOut(FORPhi, ResumePhiRecipe); - // Now create VPLiveOuts for users in the exit block. - // Extract the penultimate value of the recurrence and add VPLiveOut - // users of the recurrence splice. - - // No edge from the middle block to the unique exit block has been inserted - // and there is nothing to fix from vector loop; phis should have incoming - // from scalar loop only. - if (ExitingValuesToFix.empty()) - continue; - for (User *U : FORPhi->users()) { - auto *UI = cast<Instruction>(U); - if (UI->getParent() != ExitBB) + // Now update VPIRInstructions modeling LCSSA phis in the exit block. + // Extract the penultimate value of the recurrence and use it as operand for + // the VPIRInstruction modeling the phi. + for (VPIRInstruction *ExitIRI : ExitUsersToFix) { + if (ExitIRI->getOperand(0) != FOR) continue; VPValue *Ext = MiddleBuilder.createNaryOp( VPInstruction::ExtractFromEnd, {FOR->getBackedgeValue(), TwoVPV}, {}, "vector.recur.extract.for.phi"); - Plan.addLiveOut(cast<PHINode>(UI), Ext); - ExitingValuesToFix.erase(cast<PHINode>(UI)); + ExitIRI->setOperand(0, Ext); + ExitUsersToFix.remove(ExitIRI); } } } @@ -9006,11 +8998,10 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) { "VPBasicBlock"); RecipeBuilder.fixHeaderPhis(); - MapVector<PHINode *, VPValue *> ExitingValuesToFix = collectUsersInExitBlock( + SetVector<VPIRInstruction *> ExitUsersToFix = collectUsersInExitBlock( OrigLoop, RecipeBuilder, *Plan, Legal->getInductionVars()); - - addLiveOutsForFirstOrderRecurrences(*Plan, ExitingValuesToFix); - addUsersInExitBlock(*Plan, ExitingValuesToFix); + addLiveOutsForFirstOrderRecurrences(*Plan, ExitUsersToFix); + addUsersInExitBlock(*Plan, ExitUsersToFix); // --------------------------------------------------------------------------- // Transform initial VPlan: Apply previously taken decisions, in order, to @@ -10128,7 +10119,9 @@ bool LoopVectorizePass::processLoop(Loop *L) { // directly in VPlan. EpilogILV.setTripCount(MainILV.getTripCount()); for (auto &R : make_early_inc_range(*BestEpiPlan.getPreheader())) { - auto *ExpandR = cast<VPExpandSCEVRecipe>(&R); + auto *ExpandR = dyn_cast<VPExpandSCEVRecipe>(&R); + if (!ExpandR) + continue; auto *ExpandedVal = BestEpiPlan.getOrAddLiveIn( ExpandedSCEVs.find(ExpandR->getSCEV())->second); ExpandR->replaceAllUsesWith(ExpandedVal); diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 5f2bf08..282bb8e 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -7481,7 +7481,16 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, PrevMaxBW), std::min<unsigned>(DL->getTypeSizeInBits(VL0->getType()), PrevMinBW)); - ExtraBitWidthNodes.insert(VectorizableTree.size() + 1); + } + TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, + ReuseShuffleIndices); + LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); + + TE->setOperandsInOrder(); + for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) + buildTree_rec(TE->getOperand(I), Depth + 1, {TE, I}); + if (ShuffleOrOp == Instruction::Trunc) { + ExtraBitWidthNodes.insert(getOperandEntry(TE, 0)->Idx); } else if (ShuffleOrOp == Instruction::SIToFP || ShuffleOrOp == Instruction::UIToFP) { unsigned NumSignBits = @@ -7492,15 +7501,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, } if (NumSignBits * 2 >= DL->getTypeSizeInBits(VL0->getOperand(0)->getType())) - ExtraBitWidthNodes.insert(VectorizableTree.size() + 1); + ExtraBitWidthNodes.insert(getOperandEntry(TE, 0)->Idx); } - TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, - ReuseShuffleIndices); - LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); - - TE->setOperandsInOrder(); - for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) - buildTree_rec(TE->getOperand(I), Depth + 1, {TE, I}); return; } case Instruction::ICmp: diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp index 3164686..a310756 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -55,6 +55,7 @@ using namespace llvm::VPlanPatternMatch; namespace llvm { extern cl::opt<bool> EnableVPlanNativePath; } +extern cl::opt<unsigned> ForceTargetInstructionCost; static cl::opt<bool> PrintVPlansInDotFormat( "vplan-print-in-dot-format", cl::Hidden, @@ -223,11 +224,9 @@ VPBasicBlock::iterator VPBasicBlock::getFirstNonPhi() { VPTransformState::VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI, DominatorTree *DT, IRBuilderBase &Builder, - InnerLoopVectorizer *ILV, VPlan *Plan, - LLVMContext &Ctx) + InnerLoopVectorizer *ILV, VPlan *Plan) : VF(VF), UF(UF), CFG(DT), LI(LI), Builder(Builder), ILV(ILV), Plan(Plan), - LVer(nullptr), - TypeAnalysis(Plan->getCanonicalIV()->getScalarType(), Ctx) {} + LVer(nullptr), TypeAnalysis(Plan->getCanonicalIV()->getScalarType()) {} Value *VPTransformState::get(VPValue *Def, const VPIteration &Instance) { if (Def->isLiveIn()) @@ -795,7 +794,9 @@ InstructionCost VPRegionBlock::cost(ElementCount VF, VPCostContext &Ctx) { for (VPBlockBase *Block : vp_depth_first_shallow(getEntry())) Cost += Block->cost(VF, Ctx); InstructionCost BackedgeCost = - Ctx.TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); + ForceTargetInstructionCost.getNumOccurrences() + ? InstructionCost(ForceTargetInstructionCost.getNumOccurrences()) + : Ctx.TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); LLVM_DEBUG(dbgs() << "Cost of " << BackedgeCost << " for VF " << VF << ": vector loop backedge\n"); Cost += BackedgeCost; @@ -860,10 +861,18 @@ VPlan::~VPlan() { delete BackedgeTakenCount; } +static VPIRBasicBlock *createVPIRBasicBlockFor(BasicBlock *BB) { + auto *VPIRBB = new VPIRBasicBlock(BB); + for (Instruction &I : + make_range(BB->begin(), BB->getTerminator()->getIterator())) + VPIRBB->appendRecipe(new VPIRInstruction(I)); + return VPIRBB; +} + VPlanPtr VPlan::createInitialVPlan(const SCEV *TripCount, ScalarEvolution &SE, bool RequiresScalarEpilogueCheck, bool TailFolded, Loop *TheLoop) { - VPIRBasicBlock *Entry = new VPIRBasicBlock(TheLoop->getLoopPreheader()); + VPIRBasicBlock *Entry = createVPIRBasicBlockFor(TheLoop->getLoopPreheader()); VPBasicBlock *VecPreheader = new VPBasicBlock("vector.ph"); auto Plan = std::make_unique<VPlan>(Entry, VecPreheader); Plan->TripCount = @@ -895,7 +904,7 @@ VPlanPtr VPlan::createInitialVPlan(const SCEV *TripCount, ScalarEvolution &SE, // we unconditionally branch to the scalar preheader. Do nothing. // 3) Otherwise, construct a runtime check. BasicBlock *IRExitBlock = TheLoop->getUniqueExitBlock(); - auto *VPExitBlock = new VPIRBasicBlock(IRExitBlock); + auto *VPExitBlock = createVPIRBasicBlockFor(IRExitBlock); // The connection order corresponds to the operands of the conditional branch. VPBlockUtils::insertBlockAfter(VPExitBlock, MiddleVPBB); VPBlockUtils::connectBlocks(MiddleVPBB, ScalarPH); @@ -968,13 +977,15 @@ void VPlan::prepareToExecute(Value *TripCountV, Value *VectorTripCountV, } /// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p -/// VPBB are moved to the newly created VPIRBasicBlock. VPBB must have a single -/// predecessor, which is rewired to the new VPIRBasicBlock. All successors of -/// VPBB, if any, are rewired to the new VPIRBasicBlock. +/// VPBB are moved to the end of the newly created VPIRBasicBlock. VPBB must +/// have a single predecessor, which is rewired to the new VPIRBasicBlock. All +/// successors of VPBB, if any, are rewired to the new VPIRBasicBlock. static void replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB) { - VPIRBasicBlock *IRMiddleVPBB = new VPIRBasicBlock(IRBB); - for (auto &R : make_early_inc_range(*VPBB)) + VPIRBasicBlock *IRMiddleVPBB = createVPIRBasicBlockFor(IRBB); + for (auto &R : make_early_inc_range(*VPBB)) { + assert(!R.isPhi() && "Tried to move phi recipe to end of block"); R.moveBefore(*IRMiddleVPBB, IRMiddleVPBB->end()); + } VPBlockBase *PredVPBB = VPBB->getSinglePredecessor(); VPBlockUtils::disconnectBlocks(PredVPBB, VPBB); VPBlockUtils::connectBlocks(PredVPBB, IRMiddleVPBB); diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 64242e4..eac4fe8 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -255,7 +255,7 @@ struct VPIteration { struct VPTransformState { VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI, DominatorTree *DT, IRBuilderBase &Builder, - InnerLoopVectorizer *ILV, VPlan *Plan, LLVMContext &Ctx); + InnerLoopVectorizer *ILV, VPlan *Plan); /// The chosen Vectorization and Unroll Factors of the loop being vectorized. ElementCount VF; @@ -743,9 +743,9 @@ struct VPCostContext { SmallPtrSet<Instruction *, 8> SkipCostComputation; VPCostContext(const TargetTransformInfo &TTI, const TargetLibraryInfo &TLI, - Type *CanIVTy, LLVMContext &LLVMCtx, - LoopVectorizationCostModel &CM) - : TTI(TTI), TLI(TLI), Types(CanIVTy, LLVMCtx), LLVMCtx(LLVMCtx), CM(CM) {} + Type *CanIVTy, LoopVectorizationCostModel &CM) + : TTI(TTI), TLI(TLI), Types(CanIVTy), LLVMCtx(CanIVTy->getContext()), + CM(CM) {} /// Return the cost for \p UI with \p VF using the legacy cost model as /// fallback until computing the cost of all recipes migrates to VPlan. @@ -936,8 +936,9 @@ public: case VPRecipeBase::VPReductionPHISC: case VPRecipeBase::VPScalarCastSC: return true; - case VPRecipeBase::VPInterleaveSC: case VPRecipeBase::VPBranchOnMaskSC: + case VPRecipeBase::VPInterleaveSC: + case VPRecipeBase::VPIRInstructionSC: case VPRecipeBase::VPWidenLoadEVLSC: case VPRecipeBase::VPWidenLoadSC: case VPRecipeBase::VPWidenStoreEVLSC: @@ -1405,6 +1406,45 @@ public: bool isSingleScalar() const; }; +/// A recipe to wrap on original IR instruction not to be modified during +/// execution, execept for PHIs. For PHIs, a single VPValue operand is allowed, +/// and it is used to add a new incoming value for the single predecessor VPBB. +/// Expect PHIs, VPIRInstructions cannot have any operands. +class VPIRInstruction : public VPRecipeBase { + Instruction &I; + +public: + VPIRInstruction(Instruction &I) + : VPRecipeBase(VPDef::VPIRInstructionSC, ArrayRef<VPValue *>()), I(I) {} + + ~VPIRInstruction() override = default; + + VP_CLASSOF_IMPL(VPDef::VPIRInstructionSC) + + VPIRInstruction *clone() override { + auto *R = new VPIRInstruction(I); + for (auto *Op : operands()) + R->addOperand(Op); + return R; + } + + void execute(VPTransformState &State) override; + + Instruction &getInstruction() { return I; } + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) + /// Print the recipe. + void print(raw_ostream &O, const Twine &Indent, + VPSlotTracker &SlotTracker) const override; +#endif + + bool usesScalars(const VPValue *Op) const override { + assert(is_contained(operands(), Op) && + "Op must be an operand of the recipe"); + return true; + } +}; + /// VPWidenRecipe is a recipe for producing a widened instruction using the /// opcode and operands of the recipe. This recipe covers most of the /// traditional vectorization cases where each recipe transforms into a diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.h b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.h index 438364e..cc21870 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.h +++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.h @@ -11,6 +11,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" +#include "llvm/IR/Type.h" namespace llvm { @@ -54,8 +55,8 @@ class VPTypeAnalysis { Type *inferScalarTypeForRecipe(const VPReplicateRecipe *R); public: - VPTypeAnalysis(Type *CanonicalIVTy, LLVMContext &Ctx) - : CanonicalIVTy(CanonicalIVTy), Ctx(Ctx) {} + VPTypeAnalysis(Type *CanonicalIVTy) + : CanonicalIVTy(CanonicalIVTy), Ctx(CanonicalIVTy->getContext()) {} /// Infer the type of \p V. Returns the scalar type of \p V. Type *inferScalarType(const VPValue *V); diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index 351f909..9068ccf 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -867,6 +867,43 @@ void VPInstruction::print(raw_ostream &O, const Twine &Indent, } #endif +void VPIRInstruction::execute(VPTransformState &State) { + assert((isa<PHINode>(&I) || getNumOperands() == 0) && + "Only PHINodes can have extra operands"); + if (getNumOperands() == 1) { + VPValue *ExitValue = getOperand(0); + auto Lane = vputils::isUniformAfterVectorization(ExitValue) + ? VPLane::getFirstLane() + : VPLane::getLastLaneForVF(State.VF); + auto *PredVPBB = cast<VPBasicBlock>(getParent()->getSinglePredecessor()); + BasicBlock *PredBB = State.CFG.VPBB2IRBB[PredVPBB]; + // Set insertion point in PredBB in case an extract needs to be generated. + // TODO: Model extracts explicitly. + State.Builder.SetInsertPoint(PredBB, PredBB->getFirstNonPHIIt()); + Value *V = State.get(ExitValue, VPIteration(State.UF - 1, Lane)); + auto *Phi = cast<PHINode>(&I); + Phi->addIncoming(V, PredBB); + } + + // Advance the insert point after the wrapped IR instruction. This allows + // interleaving VPIRInstructions and other recipes. + State.Builder.SetInsertPoint(I.getParent(), std::next(I.getIterator())); +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +void VPIRInstruction::print(raw_ostream &O, const Twine &Indent, + VPSlotTracker &SlotTracker) const { + O << Indent << "IR " << I; + + if (getNumOperands() != 0) { + assert(getNumOperands() == 1 && "can have at most 1 operand"); + O << " (extra operand: "; + printOperands(O, SlotTracker); + O << ")"; + } +} +#endif + void VPWidenCallRecipe::execute(VPTransformState &State) { assert(State.VF.isVector() && "not widening"); Function *CalledScalarFn = getCalledScalarFunction(); diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index e104efd..1d84550 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -537,7 +537,7 @@ createScalarIVSteps(VPlan &Plan, InductionDescriptor::InductionKind Kind, // Truncate base induction if needed. Type *CanonicalIVType = CanonicalIV->getScalarType(); - VPTypeAnalysis TypeInfo(CanonicalIVType, CanonicalIVType->getContext()); + VPTypeAnalysis TypeInfo(CanonicalIVType); Type *ResultTy = TypeInfo.inferScalarType(BaseIV); if (TruncI) { Type *TruncTy = TruncI->getType(); @@ -940,8 +940,7 @@ static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) { // Verify that the cached type info is for both A and its users is still // accurate by comparing it to freshly computed types. VPTypeAnalysis TypeInfo2( - R.getParent()->getPlan()->getCanonicalIV()->getScalarType(), - TypeInfo.getContext()); + R.getParent()->getPlan()->getCanonicalIV()->getScalarType()); assert(TypeInfo.inferScalarType(A) == TypeInfo2.inferScalarType(A)); for (VPUser *U : A->users()) { auto *R = dyn_cast<VPRecipeBase>(U); @@ -976,7 +975,7 @@ static void simplifyRecipes(VPlan &Plan) { ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT( Plan.getEntry()); Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType(); - VPTypeAnalysis TypeInfo(CanonicalIVType, CanonicalIVType->getContext()); + VPTypeAnalysis TypeInfo(CanonicalIVType); for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) { for (VPRecipeBase &R : make_early_inc_range(*VPBB)) { simplifyRecipe(R, TypeInfo); @@ -997,8 +996,7 @@ void VPlanTransforms::truncateToMinimalBitwidths( // typed. DenseMap<VPValue *, VPWidenCastRecipe *> ProcessedTruncs; Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType(); - LLVMContext &Ctx = CanonicalIVType->getContext(); - VPTypeAnalysis TypeInfo(CanonicalIVType, Ctx); + VPTypeAnalysis TypeInfo(CanonicalIVType); VPBasicBlock *PH = Plan.getEntry(); for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>( vp_depth_first_deep(Plan.getVectorLoopRegion()))) { @@ -1052,6 +1050,7 @@ void VPlanTransforms::truncateToMinimalBitwidths( assert(OldResTy->isIntegerTy() && "only integer types supported"); (void)OldResSizeInBits; + LLVMContext &Ctx = CanonicalIVType->getContext(); auto *NewResTy = IntegerType::get(Ctx, NewResSizeInBits); // Any wrapping introduced by shrinking this operation shouldn't be diff --git a/llvm/lib/Transforms/Vectorize/VPlanValue.h b/llvm/lib/Transforms/Vectorize/VPlanValue.h index b8b2c0b..1dd8d09 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanValue.h +++ b/llvm/lib/Transforms/Vectorize/VPlanValue.h @@ -339,6 +339,7 @@ public: VPBranchOnMaskSC, VPDerivedIVSC, VPExpandSCEVSC, + VPIRInstructionSC, VPInstructionSC, VPInterleaveSC, VPReductionEVLSC, diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp index dfddb5b4..99bc4c3 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp @@ -18,6 +18,7 @@ #include "VPlanDominatorTree.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/CommandLine.h" #define DEBUG_TYPE "loop-vectorize" @@ -35,6 +36,11 @@ class VPlanVerifier { // VPHeaderPHIRecipes. bool verifyPhiRecipes(const VPBasicBlock *VPBB); + /// Verify that \p EVL is used correctly. The user must be either in + /// EVL-based recipes as a last operand or VPInstruction::Add which is + /// incoming value into EVL's recipe. + bool verifyEVLRecipe(const VPInstruction &EVL) const; + bool verifyVPBasicBlock(const VPBasicBlock *VPBB); bool verifyBlock(const VPBlockBase *VPB); @@ -114,6 +120,67 @@ bool VPlanVerifier::verifyPhiRecipes(const VPBasicBlock *VPBB) { return true; } +bool VPlanVerifier::verifyEVLRecipe(const VPInstruction &EVL) const { + if (EVL.getOpcode() != VPInstruction::ExplicitVectorLength) { + errs() << "verifyEVLRecipe should only be called on " + "VPInstruction::ExplicitVectorLength\n"; + return false; + } + auto VerifyEVLUse = [&](const VPRecipeBase &R, + const unsigned ExpectedIdx) -> bool { + SmallVector<const VPValue *> Ops(R.operands()); + unsigned UseCount = count(Ops, &EVL); + if (UseCount != 1 || Ops[ExpectedIdx] != &EVL) { + errs() << "EVL is used as non-last operand in EVL-based recipe\n"; + return false; + } + return true; + }; + for (const VPUser *U : EVL.users()) { + if (!TypeSwitch<const VPUser *, bool>(U) + .Case<VPWidenStoreEVLRecipe>([&](const VPWidenStoreEVLRecipe *S) { + return VerifyEVLUse(*S, 2); + }) + .Case<VPWidenLoadEVLRecipe>([&](const VPWidenLoadEVLRecipe *L) { + return VerifyEVLUse(*L, 1); + }) + .Case<VPWidenEVLRecipe>([&](const VPWidenEVLRecipe *W) { + return VerifyEVLUse( + *W, Instruction::isUnaryOp(W->getOpcode()) ? 1 : 2); + }) + .Case<VPReductionEVLRecipe>([&](const VPReductionEVLRecipe *R) { + return VerifyEVLUse(*R, 2); + }) + .Case<VPScalarCastRecipe>( + [&](const VPScalarCastRecipe *S) { return true; }) + .Case<VPInstruction>([&](const VPInstruction *I) { + if (I->getOpcode() != Instruction::Add) { + errs() + << "EVL is used as an operand in non-VPInstruction::Add\n"; + return false; + } + if (I->getNumUsers() != 1) { + errs() << "EVL is used in VPInstruction:Add with multiple " + "users\n"; + return false; + } + if (!isa<VPEVLBasedIVPHIRecipe>(*I->users().begin())) { + errs() << "Result of VPInstruction::Add with EVL operand is " + "not used by VPEVLBasedIVPHIRecipe\n"; + return false; + } + return true; + }) + .Default([&](const VPUser *U) { + errs() << "EVL has unexpected user\n"; + return false; + })) { + return false; + } + } + return true; +} + bool VPlanVerifier::verifyVPBasicBlock(const VPBasicBlock *VPBB) { if (!verifyPhiRecipes(VPBB)) return false; @@ -126,6 +193,15 @@ bool VPlanVerifier::verifyVPBasicBlock(const VPBasicBlock *VPBB) { RecipeNumbering[&R] = Cnt++; for (const VPRecipeBase &R : *VPBB) { + if (isa<VPIRInstruction>(&R) ^ isa<VPIRBasicBlock>(VPBB)) { + errs() << "VPIRInstructions "; +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) + R.dump(); + errs() << " "; +#endif + errs() << "not in a VPIRBasicBlock!\n"; + return false; + } for (const VPValue *V : R.definedValues()) { for (const VPUser *U : V->users()) { auto *UI = dyn_cast<VPRecipeBase>(U); @@ -150,6 +226,13 @@ bool VPlanVerifier::verifyVPBasicBlock(const VPBasicBlock *VPBB) { } } } + if (const auto *EVL = dyn_cast<VPInstruction>(&R)) { + if (EVL->getOpcode() == VPInstruction::ExplicitVectorLength && + !verifyEVLRecipe(*EVL)) { + errs() << "EVL VPValue is not used correctly\n"; + return false; + } + } } auto *IRBB = dyn_cast<VPIRBasicBlock>(VPBB); diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index d7afe2f..58701bfa 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -2597,11 +2597,19 @@ bool VectorCombine::shrinkType(llvm::Instruction &I) { auto *SmallTy = cast<FixedVectorType>(ZExted->getType()); unsigned BW = SmallTy->getElementType()->getPrimitiveSizeInBits(); - // Check that the expression overall uses at most the same number of bits as - // ZExted - KnownBits KB = computeKnownBits(&I, *DL); - if (KB.countMaxActiveBits() > BW) - return false; + if (I.getOpcode() == Instruction::LShr) { + // Check that the shift amount is less than the number of bits in the + // smaller type. Otherwise, the smaller lshr will return a poison value. + KnownBits ShAmtKB = computeKnownBits(I.getOperand(1), *DL); + if (ShAmtKB.getMaxValue().uge(BW)) + return false; + } else { + // Check that the expression overall uses at most the same number of bits as + // ZExted + KnownBits KB = computeKnownBits(&I, *DL); + if (KB.countMaxActiveBits() > BW) + return false; + } // Calculate costs of leaving current IR as it is and moving ZExt operation // later, along with adding truncates if needed @@ -2628,7 +2636,7 @@ bool VectorCombine::shrinkType(llvm::Instruction &I) { return false; // Check if we can propagate ZExt through its other users - KB = computeKnownBits(UI, *DL); + KnownBits KB = computeKnownBits(UI, *DL); if (KB.countMaxActiveBits() > BW) return false; diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir index 62d98a2..ee3087a 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir @@ -544,6 +544,13 @@ # DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}} # DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected # DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected +# DEBUG-NEXT: G_FPTOSI_SAT (opcode {{[0-9]+}}): 2 type indices, 0 imm indices +# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected +# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected +# DEBUG-NEXT: G_FPTOUI_SAT (opcode {{[0-9]+}}): 2 type indices, 0 imm indices +# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}} +# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected +# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected # DEBUG-NEXT: G_FABS (opcode {{[0-9]+}}): 1 type index, 0 imm indices # DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected # DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected diff --git a/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll b/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll index 1a60f87..4fb0c27 100644 --- a/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll @@ -354,11 +354,10 @@ define dso_local i32 @test_store_release_i64(i32, i64 %val, ptr %addr) { } ; The stxp result cannot be allocated to the same register as the inputs. -; FIXME: This is a miscompile. define dso_local i32 @test_stxp_undef(ptr %p, i64 %x) nounwind { ; CHECK-LABEL: test_stxp_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: stxp w8, x8, x1, [x0] +; CHECK-NEXT: stxp w8, x9, x1, [x0] ; CHECK-NEXT: mov w0, w8 ; CHECK-NEXT: ret %res = call i32 @llvm.aarch64.stxp(i64 undef, i64 %x, ptr %p) diff --git a/llvm/test/CodeGen/AArch64/ctlo.ll b/llvm/test/CodeGen/AArch64/ctlo.ll new file mode 100644 index 0000000..e047545 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/ctlo.ll @@ -0,0 +1,132 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s --mtriple=aarch64 -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc < %s --mtriple=aarch64 -global-isel -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,CHECK-GI + +declare i8 @llvm.ctlz.i8(i8, i1) +declare i16 @llvm.ctlz.i16(i16, i1) +declare i32 @llvm.ctlz.i32(i32, i1) +declare i64 @llvm.ctlz.i64(i64, i1) + +define i8 @ctlo_i8(i8 %x) { +; CHECK-SD-LABEL: ctlo_i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov w8, #-1 // =0xffffffff +; CHECK-SD-NEXT: eor w8, w8, w0, lsl #24 +; CHECK-SD-NEXT: clz w0, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: ctlo_i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #255 // =0xff +; CHECK-GI-NEXT: bic w8, w8, w0 +; CHECK-GI-NEXT: clz w8, w8 +; CHECK-GI-NEXT: sub w0, w8, #24 +; CHECK-GI-NEXT: ret + %tmp1 = xor i8 %x, -1 + %tmp2 = call i8 @llvm.ctlz.i8( i8 %tmp1, i1 false ) + ret i8 %tmp2 +} + +define i8 @ctlo_i8_undef(i8 %x) { +; CHECK-SD-LABEL: ctlo_i8_undef: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mvn w8, w0 +; CHECK-SD-NEXT: lsl w8, w8, #24 +; CHECK-SD-NEXT: clz w0, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: ctlo_i8_undef: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #255 // =0xff +; CHECK-GI-NEXT: bic w8, w8, w0 +; CHECK-GI-NEXT: clz w8, w8 +; CHECK-GI-NEXT: sub w0, w8, #24 +; CHECK-GI-NEXT: ret + %tmp1 = xor i8 %x, -1 + %tmp2 = call i8 @llvm.ctlz.i8( i8 %tmp1, i1 true ) + ret i8 %tmp2 +} + +define i16 @ctlo_i16(i16 %x) { +; CHECK-SD-LABEL: ctlo_i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov w8, #-1 // =0xffffffff +; CHECK-SD-NEXT: eor w8, w8, w0, lsl #16 +; CHECK-SD-NEXT: clz w0, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: ctlo_i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #65535 // =0xffff +; CHECK-GI-NEXT: bic w8, w8, w0 +; CHECK-GI-NEXT: clz w8, w8 +; CHECK-GI-NEXT: sub w0, w8, #16 +; CHECK-GI-NEXT: ret + %tmp1 = xor i16 %x, -1 + %tmp2 = call i16 @llvm.ctlz.i16( i16 %tmp1, i1 false ) + ret i16 %tmp2 +} + +define i16 @ctlo_i16_undef(i16 %x) { +; CHECK-SD-LABEL: ctlo_i16_undef: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mvn w8, w0 +; CHECK-SD-NEXT: lsl w8, w8, #16 +; CHECK-SD-NEXT: clz w0, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: ctlo_i16_undef: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #65535 // =0xffff +; CHECK-GI-NEXT: bic w8, w8, w0 +; CHECK-GI-NEXT: clz w8, w8 +; CHECK-GI-NEXT: sub w0, w8, #16 +; CHECK-GI-NEXT: ret + %tmp1 = xor i16 %x, -1 + %tmp2 = call i16 @llvm.ctlz.i16( i16 %tmp1, i1 true ) + ret i16 %tmp2 +} + +define i32 @ctlo_i32(i32 %x) { +; CHECK-LABEL: ctlo_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mvn w8, w0 +; CHECK-NEXT: clz w0, w8 +; CHECK-NEXT: ret + %tmp1 = xor i32 %x, -1 + %tmp2 = call i32 @llvm.ctlz.i32( i32 %tmp1, i1 false ) + ret i32 %tmp2 +} + +define i32 @ctlo_i32_undef(i32 %x) { +; CHECK-LABEL: ctlo_i32_undef: +; CHECK: // %bb.0: +; CHECK-NEXT: mvn w8, w0 +; CHECK-NEXT: clz w0, w8 +; CHECK-NEXT: ret + %tmp1 = xor i32 %x, -1 + %tmp2 = call i32 @llvm.ctlz.i32( i32 %tmp1, i1 true ) + ret i32 %tmp2 +} + +define i64 @ctlo_i64(i64 %x) { +; CHECK-LABEL: ctlo_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mvn x8, x0 +; CHECK-NEXT: clz x0, x8 +; CHECK-NEXT: ret + %tmp1 = xor i64 %x, -1 + %tmp2 = call i64 @llvm.ctlz.i64( i64 %tmp1, i1 false ) + ret i64 %tmp2 +} + +define i64 @ctlo_i64_undef(i64 %x) { +; CHECK-LABEL: ctlo_i64_undef: +; CHECK: // %bb.0: +; CHECK-NEXT: mvn x8, x0 +; CHECK-NEXT: clz x0, x8 +; CHECK-NEXT: ret + %tmp1 = xor i64 %x, -1 + %tmp2 = call i64 @llvm.ctlz.i64( i64 %tmp1, i1 true ) + ret i64 %tmp2 +} diff --git a/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll b/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll index eeb1504d..9c52b02 100644 --- a/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll +++ b/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT -; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16 +; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-CVT +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16 +; RUN: llc < %s -mtriple=aarch64 -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-CVT +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16 ; ; 32-bit float to signed integer @@ -18,13 +20,23 @@ declare i100 @llvm.fptosi.sat.i100.f32(float) declare i128 @llvm.fptosi.sat.i128.f32(float) define i1 @test_signed_i1_f32(float %f) nounwind { -; CHECK-LABEL: test_signed_i1_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzs w8, s0 -; CHECK-NEXT: ands w8, w8, w8, asr #31 -; CHECK-NEXT: csinv w8, w8, wzr, ge -; CHECK-NEXT: and w0, w8, #0x1 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_i1_f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzs w8, s0 +; CHECK-SD-NEXT: ands w8, w8, w8, asr #31 +; CHECK-SD-NEXT: csinv w8, w8, wzr, ge +; CHECK-SD-NEXT: and w0, w8, #0x1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_i1_f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs w8, s0 +; CHECK-GI-NEXT: cmp w8, #0 +; CHECK-GI-NEXT: csel w8, w8, wzr, lt +; CHECK-GI-NEXT: cmp w8, #0 +; CHECK-GI-NEXT: csinv w8, w8, wzr, ge +; CHECK-GI-NEXT: and w0, w8, #0x1 +; CHECK-GI-NEXT: ret %x = call i1 @llvm.fptosi.sat.i1.f32(float %f) ret i1 %x } @@ -99,16 +111,27 @@ define i32 @test_signed_i32_f32(float %f) nounwind { } define i50 @test_signed_i50_f32(float %f) nounwind { -; CHECK-LABEL: test_signed_i50_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzs x8, s0 -; CHECK-NEXT: mov x9, #562949953421311 // =0x1ffffffffffff -; CHECK-NEXT: cmp x8, x9 -; CHECK-NEXT: csel x8, x8, x9, lt -; CHECK-NEXT: mov x9, #-562949953421312 // =0xfffe000000000000 -; CHECK-NEXT: cmp x8, x9 -; CHECK-NEXT: csel x0, x8, x9, gt -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_i50_f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzs x8, s0 +; CHECK-SD-NEXT: mov x9, #562949953421311 // =0x1ffffffffffff +; CHECK-SD-NEXT: cmp x8, x9 +; CHECK-SD-NEXT: csel x8, x8, x9, lt +; CHECK-SD-NEXT: mov x9, #-562949953421312 // =0xfffe000000000000 +; CHECK-SD-NEXT: cmp x8, x9 +; CHECK-SD-NEXT: csel x0, x8, x9, gt +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_i50_f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs x8, s0 +; CHECK-GI-NEXT: mov x9, #562949953421311 // =0x1ffffffffffff +; CHECK-GI-NEXT: mov x10, #-562949953421312 // =0xfffe000000000000 +; CHECK-GI-NEXT: cmp x8, x9 +; CHECK-GI-NEXT: csel x8, x8, x9, lt +; CHECK-GI-NEXT: cmp x8, x10 +; CHECK-GI-NEXT: csel x0, x8, x10, gt +; CHECK-GI-NEXT: ret %x = call i50 @llvm.fptosi.sat.i50.f32(float %f) ret i50 %x } @@ -123,57 +146,105 @@ define i64 @test_signed_i64_f32(float %f) nounwind { } define i100 @test_signed_i100_f32(float %f) nounwind { -; CHECK-LABEL: test_signed_i100_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #8] // 8-byte Folded Spill -; CHECK-NEXT: fmov s8, s0 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: movi v0.2s, #241, lsl #24 -; CHECK-NEXT: mov w8, #1895825407 // =0x70ffffff -; CHECK-NEXT: mov x10, #34359738367 // =0x7ffffffff -; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload -; CHECK-NEXT: fcmp s8, s0 -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov x8, #-34359738368 // =0xfffffff800000000 -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: csel x8, x8, x1, lt -; CHECK-NEXT: fcmp s8, s0 -; CHECK-NEXT: csel x8, x10, x8, gt -; CHECK-NEXT: csinv x9, x9, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: csel x0, xzr, x9, vs -; CHECK-NEXT: csel x1, xzr, x8, vs -; CHECK-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_i100_f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-SD-NEXT: fmov s8, s0 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: movi v0.2s, #241, lsl #24 +; CHECK-SD-NEXT: mov w8, #1895825407 // =0x70ffffff +; CHECK-SD-NEXT: mov x10, #34359738367 // =0x7ffffffff +; CHECK-SD-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-SD-NEXT: fcmp s8, s0 +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov x8, #-34359738368 // =0xfffffff800000000 +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: csel x8, x8, x1, lt +; CHECK-SD-NEXT: fcmp s8, s0 +; CHECK-SD-NEXT: csel x8, x10, x8, gt +; CHECK-SD-NEXT: csinv x9, x9, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: csel x0, xzr, x9, vs +; CHECK-SD-NEXT: csel x1, xzr, x8, vs +; CHECK-SD-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_i100_f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-GI-NEXT: fmov s8, s0 +; CHECK-GI-NEXT: bl __fixsfti +; CHECK-GI-NEXT: movi v0.2s, #241, lsl #24 +; CHECK-GI-NEXT: mov w8, #1895825407 // =0x70ffffff +; CHECK-GI-NEXT: mov x10, #34359738367 // =0x7ffffffff +; CHECK-GI-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-GI-NEXT: fcmp s8, s0 +; CHECK-GI-NEXT: fmov s0, w8 +; CHECK-GI-NEXT: mov x8, #34359738368 // =0x800000000 +; CHECK-GI-NEXT: csel x9, xzr, x0, lt +; CHECK-GI-NEXT: csel x8, x8, x1, lt +; CHECK-GI-NEXT: fcmp s8, s0 +; CHECK-GI-NEXT: csinv x9, x9, xzr, le +; CHECK-GI-NEXT: csel x8, x10, x8, gt +; CHECK-GI-NEXT: fcmp s8, s8 +; CHECK-GI-NEXT: csel x0, xzr, x9, vs +; CHECK-GI-NEXT: csel x1, xzr, x8, vs +; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-GI-NEXT: ret %x = call i100 @llvm.fptosi.sat.i100.f32(float %f) ret i100 %x } define i128 @test_signed_i128_f32(float %f) nounwind { -; CHECK-LABEL: test_signed_i128_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #8] // 8-byte Folded Spill -; CHECK-NEXT: fmov s8, s0 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: movi v0.2s, #255, lsl #24 -; CHECK-NEXT: mov w8, #2130706431 // =0x7effffff -; CHECK-NEXT: mov x10, #9223372036854775807 // =0x7fffffffffffffff -; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload -; CHECK-NEXT: fcmp s8, s0 -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000 -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: csel x8, x8, x1, lt -; CHECK-NEXT: fcmp s8, s0 -; CHECK-NEXT: csel x8, x10, x8, gt -; CHECK-NEXT: csinv x9, x9, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: csel x0, xzr, x9, vs -; CHECK-NEXT: csel x1, xzr, x8, vs -; CHECK-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_i128_f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-SD-NEXT: fmov s8, s0 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: movi v0.2s, #255, lsl #24 +; CHECK-SD-NEXT: mov w8, #2130706431 // =0x7effffff +; CHECK-SD-NEXT: mov x10, #9223372036854775807 // =0x7fffffffffffffff +; CHECK-SD-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-SD-NEXT: fcmp s8, s0 +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000 +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: csel x8, x8, x1, lt +; CHECK-SD-NEXT: fcmp s8, s0 +; CHECK-SD-NEXT: csel x8, x10, x8, gt +; CHECK-SD-NEXT: csinv x9, x9, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: csel x0, xzr, x9, vs +; CHECK-SD-NEXT: csel x1, xzr, x8, vs +; CHECK-SD-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_i128_f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-GI-NEXT: fmov s8, s0 +; CHECK-GI-NEXT: bl __fixsfti +; CHECK-GI-NEXT: movi v0.2s, #255, lsl #24 +; CHECK-GI-NEXT: mov w8, #2130706431 // =0x7effffff +; CHECK-GI-NEXT: mov x10, #9223372036854775807 // =0x7fffffffffffffff +; CHECK-GI-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-GI-NEXT: fcmp s8, s0 +; CHECK-GI-NEXT: fmov s0, w8 +; CHECK-GI-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000 +; CHECK-GI-NEXT: csel x9, xzr, x0, lt +; CHECK-GI-NEXT: csel x8, x8, x1, lt +; CHECK-GI-NEXT: fcmp s8, s0 +; CHECK-GI-NEXT: csinv x9, x9, xzr, le +; CHECK-GI-NEXT: csel x8, x10, x8, gt +; CHECK-GI-NEXT: fcmp s8, s8 +; CHECK-GI-NEXT: csel x0, xzr, x9, vs +; CHECK-GI-NEXT: csel x1, xzr, x8, vs +; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-GI-NEXT: ret %x = call i128 @llvm.fptosi.sat.i128.f32(float %f) ret i128 %x } @@ -194,13 +265,23 @@ declare i100 @llvm.fptosi.sat.i100.f64(double) declare i128 @llvm.fptosi.sat.i128.f64(double) define i1 @test_signed_i1_f64(double %f) nounwind { -; CHECK-LABEL: test_signed_i1_f64: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzs w8, d0 -; CHECK-NEXT: ands w8, w8, w8, asr #31 -; CHECK-NEXT: csinv w8, w8, wzr, ge -; CHECK-NEXT: and w0, w8, #0x1 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_i1_f64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzs w8, d0 +; CHECK-SD-NEXT: ands w8, w8, w8, asr #31 +; CHECK-SD-NEXT: csinv w8, w8, wzr, ge +; CHECK-SD-NEXT: and w0, w8, #0x1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_i1_f64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs w8, d0 +; CHECK-GI-NEXT: cmp w8, #0 +; CHECK-GI-NEXT: csel w8, w8, wzr, lt +; CHECK-GI-NEXT: cmp w8, #0 +; CHECK-GI-NEXT: csinv w8, w8, wzr, ge +; CHECK-GI-NEXT: and w0, w8, #0x1 +; CHECK-GI-NEXT: ret %x = call i1 @llvm.fptosi.sat.i1.f64(double %f) ret i1 %x } @@ -275,16 +356,27 @@ define i32 @test_signed_i32_f64(double %f) nounwind { } define i50 @test_signed_i50_f64(double %f) nounwind { -; CHECK-LABEL: test_signed_i50_f64: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzs x8, d0 -; CHECK-NEXT: mov x9, #562949953421311 // =0x1ffffffffffff -; CHECK-NEXT: cmp x8, x9 -; CHECK-NEXT: csel x8, x8, x9, lt -; CHECK-NEXT: mov x9, #-562949953421312 // =0xfffe000000000000 -; CHECK-NEXT: cmp x8, x9 -; CHECK-NEXT: csel x0, x8, x9, gt -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_i50_f64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzs x8, d0 +; CHECK-SD-NEXT: mov x9, #562949953421311 // =0x1ffffffffffff +; CHECK-SD-NEXT: cmp x8, x9 +; CHECK-SD-NEXT: csel x8, x8, x9, lt +; CHECK-SD-NEXT: mov x9, #-562949953421312 // =0xfffe000000000000 +; CHECK-SD-NEXT: cmp x8, x9 +; CHECK-SD-NEXT: csel x0, x8, x9, gt +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_i50_f64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs x8, d0 +; CHECK-GI-NEXT: mov x9, #562949953421311 // =0x1ffffffffffff +; CHECK-GI-NEXT: mov x10, #-562949953421312 // =0xfffe000000000000 +; CHECK-GI-NEXT: cmp x8, x9 +; CHECK-GI-NEXT: csel x8, x8, x9, lt +; CHECK-GI-NEXT: cmp x8, x10 +; CHECK-GI-NEXT: csel x0, x8, x10, gt +; CHECK-GI-NEXT: ret %x = call i50 @llvm.fptosi.sat.i50.f64(double %f) ret i50 %x } @@ -299,59 +391,109 @@ define i64 @test_signed_i64_f64(double %f) nounwind { } define i100 @test_signed_i100_f64(double %f) nounwind { -; CHECK-LABEL: test_signed_i100_f64: -; CHECK: // %bb.0: -; CHECK-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #8] // 8-byte Folded Spill -; CHECK-NEXT: fmov d8, d0 -; CHECK-NEXT: bl __fixdfti -; CHECK-NEXT: mov x8, #-4170333254945079296 // =0xc620000000000000 -; CHECK-NEXT: mov x10, #34359738367 // =0x7ffffffff -; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload -; CHECK-NEXT: fmov d0, x8 -; CHECK-NEXT: mov x8, #5053038781909696511 // =0x461fffffffffffff -; CHECK-NEXT: fcmp d8, d0 -; CHECK-NEXT: fmov d0, x8 -; CHECK-NEXT: mov x8, #-34359738368 // =0xfffffff800000000 -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: csel x8, x8, x1, lt -; CHECK-NEXT: fcmp d8, d0 -; CHECK-NEXT: csel x8, x10, x8, gt -; CHECK-NEXT: csinv x9, x9, xzr, le -; CHECK-NEXT: fcmp d8, d8 -; CHECK-NEXT: csel x0, xzr, x9, vs -; CHECK-NEXT: csel x1, xzr, x8, vs -; CHECK-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_i100_f64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-SD-NEXT: fmov d8, d0 +; CHECK-SD-NEXT: bl __fixdfti +; CHECK-SD-NEXT: mov x8, #-4170333254945079296 // =0xc620000000000000 +; CHECK-SD-NEXT: mov x10, #34359738367 // =0x7ffffffff +; CHECK-SD-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-SD-NEXT: fmov d0, x8 +; CHECK-SD-NEXT: mov x8, #5053038781909696511 // =0x461fffffffffffff +; CHECK-SD-NEXT: fcmp d8, d0 +; CHECK-SD-NEXT: fmov d0, x8 +; CHECK-SD-NEXT: mov x8, #-34359738368 // =0xfffffff800000000 +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: csel x8, x8, x1, lt +; CHECK-SD-NEXT: fcmp d8, d0 +; CHECK-SD-NEXT: csel x8, x10, x8, gt +; CHECK-SD-NEXT: csinv x9, x9, xzr, le +; CHECK-SD-NEXT: fcmp d8, d8 +; CHECK-SD-NEXT: csel x0, xzr, x9, vs +; CHECK-SD-NEXT: csel x1, xzr, x8, vs +; CHECK-SD-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_i100_f64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-GI-NEXT: fmov d8, d0 +; CHECK-GI-NEXT: bl __fixdfti +; CHECK-GI-NEXT: mov x8, #-4170333254945079296 // =0xc620000000000000 +; CHECK-GI-NEXT: mov x10, #34359738367 // =0x7ffffffff +; CHECK-GI-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-GI-NEXT: fmov d0, x8 +; CHECK-GI-NEXT: mov x8, #5053038781909696511 // =0x461fffffffffffff +; CHECK-GI-NEXT: fcmp d8, d0 +; CHECK-GI-NEXT: fmov d0, x8 +; CHECK-GI-NEXT: mov x8, #34359738368 // =0x800000000 +; CHECK-GI-NEXT: csel x9, xzr, x0, lt +; CHECK-GI-NEXT: csel x8, x8, x1, lt +; CHECK-GI-NEXT: fcmp d8, d0 +; CHECK-GI-NEXT: csinv x9, x9, xzr, le +; CHECK-GI-NEXT: csel x8, x10, x8, gt +; CHECK-GI-NEXT: fcmp d8, d8 +; CHECK-GI-NEXT: csel x0, xzr, x9, vs +; CHECK-GI-NEXT: csel x1, xzr, x8, vs +; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-GI-NEXT: ret %x = call i100 @llvm.fptosi.sat.i100.f64(double %f) ret i100 %x } define i128 @test_signed_i128_f64(double %f) nounwind { -; CHECK-LABEL: test_signed_i128_f64: -; CHECK: // %bb.0: -; CHECK-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #8] // 8-byte Folded Spill -; CHECK-NEXT: fmov d8, d0 -; CHECK-NEXT: bl __fixdfti -; CHECK-NEXT: mov x8, #-4044232465378705408 // =0xc7e0000000000000 -; CHECK-NEXT: mov x10, #9223372036854775807 // =0x7fffffffffffffff -; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload -; CHECK-NEXT: fmov d0, x8 -; CHECK-NEXT: mov x8, #5179139571476070399 // =0x47dfffffffffffff -; CHECK-NEXT: fcmp d8, d0 -; CHECK-NEXT: fmov d0, x8 -; CHECK-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000 -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: csel x8, x8, x1, lt -; CHECK-NEXT: fcmp d8, d0 -; CHECK-NEXT: csel x8, x10, x8, gt -; CHECK-NEXT: csinv x9, x9, xzr, le -; CHECK-NEXT: fcmp d8, d8 -; CHECK-NEXT: csel x0, xzr, x9, vs -; CHECK-NEXT: csel x1, xzr, x8, vs -; CHECK-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_i128_f64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-SD-NEXT: fmov d8, d0 +; CHECK-SD-NEXT: bl __fixdfti +; CHECK-SD-NEXT: mov x8, #-4044232465378705408 // =0xc7e0000000000000 +; CHECK-SD-NEXT: mov x10, #9223372036854775807 // =0x7fffffffffffffff +; CHECK-SD-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-SD-NEXT: fmov d0, x8 +; CHECK-SD-NEXT: mov x8, #5179139571476070399 // =0x47dfffffffffffff +; CHECK-SD-NEXT: fcmp d8, d0 +; CHECK-SD-NEXT: fmov d0, x8 +; CHECK-SD-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000 +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: csel x8, x8, x1, lt +; CHECK-SD-NEXT: fcmp d8, d0 +; CHECK-SD-NEXT: csel x8, x10, x8, gt +; CHECK-SD-NEXT: csinv x9, x9, xzr, le +; CHECK-SD-NEXT: fcmp d8, d8 +; CHECK-SD-NEXT: csel x0, xzr, x9, vs +; CHECK-SD-NEXT: csel x1, xzr, x8, vs +; CHECK-SD-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_i128_f64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-GI-NEXT: fmov d8, d0 +; CHECK-GI-NEXT: bl __fixdfti +; CHECK-GI-NEXT: mov x8, #-4044232465378705408 // =0xc7e0000000000000 +; CHECK-GI-NEXT: mov x10, #9223372036854775807 // =0x7fffffffffffffff +; CHECK-GI-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-GI-NEXT: fmov d0, x8 +; CHECK-GI-NEXT: mov x8, #5179139571476070399 // =0x47dfffffffffffff +; CHECK-GI-NEXT: fcmp d8, d0 +; CHECK-GI-NEXT: fmov d0, x8 +; CHECK-GI-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000 +; CHECK-GI-NEXT: csel x9, xzr, x0, lt +; CHECK-GI-NEXT: csel x8, x8, x1, lt +; CHECK-GI-NEXT: fcmp d8, d0 +; CHECK-GI-NEXT: csinv x9, x9, xzr, le +; CHECK-GI-NEXT: csel x8, x10, x8, gt +; CHECK-GI-NEXT: fcmp d8, d8 +; CHECK-GI-NEXT: csel x0, xzr, x9, vs +; CHECK-GI-NEXT: csel x1, xzr, x8, vs +; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-GI-NEXT: ret %x = call i128 @llvm.fptosi.sat.i128.f64(double %f) ret i128 %x } @@ -372,245 +514,515 @@ declare i100 @llvm.fptosi.sat.i100.f16(half) declare i128 @llvm.fptosi.sat.i128.f16(half) define i1 @test_signed_i1_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_signed_i1_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: fcvtzs w8, s0 -; CHECK-CVT-NEXT: ands w8, w8, w8, asr #31 -; CHECK-CVT-NEXT: csinv w8, w8, wzr, ge -; CHECK-CVT-NEXT: and w0, w8, #0x1 -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_i1_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs w8, h0 -; CHECK-FP16-NEXT: ands w8, w8, w8, asr #31 -; CHECK-FP16-NEXT: csinv w8, w8, wzr, ge -; CHECK-FP16-NEXT: and w0, w8, #0x1 -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_i1_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: fcvtzs w8, s0 +; CHECK-SD-CVT-NEXT: ands w8, w8, w8, asr #31 +; CHECK-SD-CVT-NEXT: csinv w8, w8, wzr, ge +; CHECK-SD-CVT-NEXT: and w0, w8, #0x1 +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_i1_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs w8, h0 +; CHECK-SD-FP16-NEXT: ands w8, w8, w8, asr #31 +; CHECK-SD-FP16-NEXT: csinv w8, w8, wzr, ge +; CHECK-SD-FP16-NEXT: and w0, w8, #0x1 +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_i1_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: fcvtzs w8, s0 +; CHECK-GI-CVT-NEXT: cmp w8, #0 +; CHECK-GI-CVT-NEXT: csel w8, w8, wzr, lt +; CHECK-GI-CVT-NEXT: cmp w8, #0 +; CHECK-GI-CVT-NEXT: csinv w8, w8, wzr, ge +; CHECK-GI-CVT-NEXT: and w0, w8, #0x1 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_i1_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs w8, h0 +; CHECK-GI-FP16-NEXT: cmp w8, #0 +; CHECK-GI-FP16-NEXT: csel w8, w8, wzr, lt +; CHECK-GI-FP16-NEXT: cmp w8, #0 +; CHECK-GI-FP16-NEXT: csinv w8, w8, wzr, ge +; CHECK-GI-FP16-NEXT: and w0, w8, #0x1 +; CHECK-GI-FP16-NEXT: ret %x = call i1 @llvm.fptosi.sat.i1.f16(half %f) ret i1 %x } define i8 @test_signed_i8_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_signed_i8_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: mov w8, #127 // =0x7f -; CHECK-CVT-NEXT: fcvtzs w9, s0 -; CHECK-CVT-NEXT: cmp w9, #127 -; CHECK-CVT-NEXT: csel w8, w9, w8, lt -; CHECK-CVT-NEXT: mov w9, #-128 // =0xffffff80 -; CHECK-CVT-NEXT: cmn w8, #128 -; CHECK-CVT-NEXT: csel w0, w8, w9, gt -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_i8_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs w9, h0 -; CHECK-FP16-NEXT: mov w8, #127 // =0x7f -; CHECK-FP16-NEXT: cmp w9, #127 -; CHECK-FP16-NEXT: csel w8, w9, w8, lt -; CHECK-FP16-NEXT: mov w9, #-128 // =0xffffff80 -; CHECK-FP16-NEXT: cmn w8, #128 -; CHECK-FP16-NEXT: csel w0, w8, w9, gt -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_i8_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: mov w8, #127 // =0x7f +; CHECK-SD-CVT-NEXT: fcvtzs w9, s0 +; CHECK-SD-CVT-NEXT: cmp w9, #127 +; CHECK-SD-CVT-NEXT: csel w8, w9, w8, lt +; CHECK-SD-CVT-NEXT: mov w9, #-128 // =0xffffff80 +; CHECK-SD-CVT-NEXT: cmn w8, #128 +; CHECK-SD-CVT-NEXT: csel w0, w8, w9, gt +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_i8_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs w9, h0 +; CHECK-SD-FP16-NEXT: mov w8, #127 // =0x7f +; CHECK-SD-FP16-NEXT: cmp w9, #127 +; CHECK-SD-FP16-NEXT: csel w8, w9, w8, lt +; CHECK-SD-FP16-NEXT: mov w9, #-128 // =0xffffff80 +; CHECK-SD-FP16-NEXT: cmn w8, #128 +; CHECK-SD-FP16-NEXT: csel w0, w8, w9, gt +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_i8_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov w8, #127 // =0x7f +; CHECK-GI-CVT-NEXT: fcvtzs w9, s0 +; CHECK-GI-CVT-NEXT: cmp w9, #127 +; CHECK-GI-CVT-NEXT: csel w8, w9, w8, lt +; CHECK-GI-CVT-NEXT: mov w9, #-128 // =0xffffff80 +; CHECK-GI-CVT-NEXT: cmn w8, #128 +; CHECK-GI-CVT-NEXT: csel w0, w8, w9, gt +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_i8_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs w9, h0 +; CHECK-GI-FP16-NEXT: mov w8, #127 // =0x7f +; CHECK-GI-FP16-NEXT: cmp w9, #127 +; CHECK-GI-FP16-NEXT: csel w8, w9, w8, lt +; CHECK-GI-FP16-NEXT: mov w9, #-128 // =0xffffff80 +; CHECK-GI-FP16-NEXT: cmn w8, #128 +; CHECK-GI-FP16-NEXT: csel w0, w8, w9, gt +; CHECK-GI-FP16-NEXT: ret %x = call i8 @llvm.fptosi.sat.i8.f16(half %f) ret i8 %x } define i13 @test_signed_i13_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_signed_i13_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: mov w8, #4095 // =0xfff -; CHECK-CVT-NEXT: fcvtzs w9, s0 -; CHECK-CVT-NEXT: cmp w9, #4095 -; CHECK-CVT-NEXT: csel w8, w9, w8, lt -; CHECK-CVT-NEXT: mov w9, #-4096 // =0xfffff000 -; CHECK-CVT-NEXT: cmn w8, #1, lsl #12 // =4096 -; CHECK-CVT-NEXT: csel w0, w8, w9, gt -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_i13_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs w9, h0 -; CHECK-FP16-NEXT: mov w8, #4095 // =0xfff -; CHECK-FP16-NEXT: cmp w9, #4095 -; CHECK-FP16-NEXT: csel w8, w9, w8, lt -; CHECK-FP16-NEXT: mov w9, #-4096 // =0xfffff000 -; CHECK-FP16-NEXT: cmn w8, #1, lsl #12 // =4096 -; CHECK-FP16-NEXT: csel w0, w8, w9, gt -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_i13_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: mov w8, #4095 // =0xfff +; CHECK-SD-CVT-NEXT: fcvtzs w9, s0 +; CHECK-SD-CVT-NEXT: cmp w9, #4095 +; CHECK-SD-CVT-NEXT: csel w8, w9, w8, lt +; CHECK-SD-CVT-NEXT: mov w9, #-4096 // =0xfffff000 +; CHECK-SD-CVT-NEXT: cmn w8, #1, lsl #12 // =4096 +; CHECK-SD-CVT-NEXT: csel w0, w8, w9, gt +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_i13_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs w9, h0 +; CHECK-SD-FP16-NEXT: mov w8, #4095 // =0xfff +; CHECK-SD-FP16-NEXT: cmp w9, #4095 +; CHECK-SD-FP16-NEXT: csel w8, w9, w8, lt +; CHECK-SD-FP16-NEXT: mov w9, #-4096 // =0xfffff000 +; CHECK-SD-FP16-NEXT: cmn w8, #1, lsl #12 // =4096 +; CHECK-SD-FP16-NEXT: csel w0, w8, w9, gt +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_i13_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov w8, #4095 // =0xfff +; CHECK-GI-CVT-NEXT: fcvtzs w9, s0 +; CHECK-GI-CVT-NEXT: cmp w9, #4095 +; CHECK-GI-CVT-NEXT: csel w8, w9, w8, lt +; CHECK-GI-CVT-NEXT: mov w9, #-4096 // =0xfffff000 +; CHECK-GI-CVT-NEXT: cmn w8, #1, lsl #12 // =4096 +; CHECK-GI-CVT-NEXT: csel w0, w8, w9, gt +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_i13_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs w9, h0 +; CHECK-GI-FP16-NEXT: mov w8, #4095 // =0xfff +; CHECK-GI-FP16-NEXT: cmp w9, #4095 +; CHECK-GI-FP16-NEXT: csel w8, w9, w8, lt +; CHECK-GI-FP16-NEXT: mov w9, #-4096 // =0xfffff000 +; CHECK-GI-FP16-NEXT: cmn w8, #1, lsl #12 // =4096 +; CHECK-GI-FP16-NEXT: csel w0, w8, w9, gt +; CHECK-GI-FP16-NEXT: ret %x = call i13 @llvm.fptosi.sat.i13.f16(half %f) ret i13 %x } define i16 @test_signed_i16_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_signed_i16_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: mov w9, #32767 // =0x7fff -; CHECK-CVT-NEXT: fcvtzs w8, s0 -; CHECK-CVT-NEXT: cmp w8, w9 -; CHECK-CVT-NEXT: csel w8, w8, w9, lt -; CHECK-CVT-NEXT: mov w9, #-32768 // =0xffff8000 -; CHECK-CVT-NEXT: cmn w8, #8, lsl #12 // =32768 -; CHECK-CVT-NEXT: csel w0, w8, w9, gt -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_i16_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs w8, h0 -; CHECK-FP16-NEXT: mov w9, #32767 // =0x7fff -; CHECK-FP16-NEXT: cmp w8, w9 -; CHECK-FP16-NEXT: csel w8, w8, w9, lt -; CHECK-FP16-NEXT: mov w9, #-32768 // =0xffff8000 -; CHECK-FP16-NEXT: cmn w8, #8, lsl #12 // =32768 -; CHECK-FP16-NEXT: csel w0, w8, w9, gt -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_i16_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: mov w9, #32767 // =0x7fff +; CHECK-SD-CVT-NEXT: fcvtzs w8, s0 +; CHECK-SD-CVT-NEXT: cmp w8, w9 +; CHECK-SD-CVT-NEXT: csel w8, w8, w9, lt +; CHECK-SD-CVT-NEXT: mov w9, #-32768 // =0xffff8000 +; CHECK-SD-CVT-NEXT: cmn w8, #8, lsl #12 // =32768 +; CHECK-SD-CVT-NEXT: csel w0, w8, w9, gt +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_i16_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs w8, h0 +; CHECK-SD-FP16-NEXT: mov w9, #32767 // =0x7fff +; CHECK-SD-FP16-NEXT: cmp w8, w9 +; CHECK-SD-FP16-NEXT: csel w8, w8, w9, lt +; CHECK-SD-FP16-NEXT: mov w9, #-32768 // =0xffff8000 +; CHECK-SD-FP16-NEXT: cmn w8, #8, lsl #12 // =32768 +; CHECK-SD-FP16-NEXT: csel w0, w8, w9, gt +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_i16_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov w9, #32767 // =0x7fff +; CHECK-GI-CVT-NEXT: fcvtzs w8, s0 +; CHECK-GI-CVT-NEXT: cmp w8, w9 +; CHECK-GI-CVT-NEXT: csel w8, w8, w9, lt +; CHECK-GI-CVT-NEXT: mov w9, #-32768 // =0xffff8000 +; CHECK-GI-CVT-NEXT: cmn w8, #8, lsl #12 // =32768 +; CHECK-GI-CVT-NEXT: csel w0, w8, w9, gt +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_i16_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs w8, h0 +; CHECK-GI-FP16-NEXT: mov w9, #32767 // =0x7fff +; CHECK-GI-FP16-NEXT: cmp w8, w9 +; CHECK-GI-FP16-NEXT: csel w8, w8, w9, lt +; CHECK-GI-FP16-NEXT: mov w9, #-32768 // =0xffff8000 +; CHECK-GI-FP16-NEXT: cmn w8, #8, lsl #12 // =32768 +; CHECK-GI-FP16-NEXT: csel w0, w8, w9, gt +; CHECK-GI-FP16-NEXT: ret %x = call i16 @llvm.fptosi.sat.i16.f16(half %f) ret i16 %x } define i19 @test_signed_i19_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_signed_i19_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: mov w9, #262143 // =0x3ffff -; CHECK-CVT-NEXT: fcvtzs w8, s0 -; CHECK-CVT-NEXT: cmp w8, w9 -; CHECK-CVT-NEXT: csel w8, w8, w9, lt -; CHECK-CVT-NEXT: mov w9, #-262144 // =0xfffc0000 -; CHECK-CVT-NEXT: cmn w8, #64, lsl #12 // =262144 -; CHECK-CVT-NEXT: csel w0, w8, w9, gt -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_i19_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs w8, h0 -; CHECK-FP16-NEXT: mov w9, #262143 // =0x3ffff -; CHECK-FP16-NEXT: cmp w8, w9 -; CHECK-FP16-NEXT: csel w8, w8, w9, lt -; CHECK-FP16-NEXT: mov w9, #-262144 // =0xfffc0000 -; CHECK-FP16-NEXT: cmn w8, #64, lsl #12 // =262144 -; CHECK-FP16-NEXT: csel w0, w8, w9, gt -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_i19_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: mov w9, #262143 // =0x3ffff +; CHECK-SD-CVT-NEXT: fcvtzs w8, s0 +; CHECK-SD-CVT-NEXT: cmp w8, w9 +; CHECK-SD-CVT-NEXT: csel w8, w8, w9, lt +; CHECK-SD-CVT-NEXT: mov w9, #-262144 // =0xfffc0000 +; CHECK-SD-CVT-NEXT: cmn w8, #64, lsl #12 // =262144 +; CHECK-SD-CVT-NEXT: csel w0, w8, w9, gt +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_i19_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs w8, h0 +; CHECK-SD-FP16-NEXT: mov w9, #262143 // =0x3ffff +; CHECK-SD-FP16-NEXT: cmp w8, w9 +; CHECK-SD-FP16-NEXT: csel w8, w8, w9, lt +; CHECK-SD-FP16-NEXT: mov w9, #-262144 // =0xfffc0000 +; CHECK-SD-FP16-NEXT: cmn w8, #64, lsl #12 // =262144 +; CHECK-SD-FP16-NEXT: csel w0, w8, w9, gt +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_i19_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov w9, #262143 // =0x3ffff +; CHECK-GI-CVT-NEXT: fcvtzs w8, s0 +; CHECK-GI-CVT-NEXT: cmp w8, w9 +; CHECK-GI-CVT-NEXT: csel w8, w8, w9, lt +; CHECK-GI-CVT-NEXT: mov w9, #-262144 // =0xfffc0000 +; CHECK-GI-CVT-NEXT: cmn w8, #64, lsl #12 // =262144 +; CHECK-GI-CVT-NEXT: csel w0, w8, w9, gt +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_i19_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs w8, h0 +; CHECK-GI-FP16-NEXT: mov w9, #262143 // =0x3ffff +; CHECK-GI-FP16-NEXT: cmp w8, w9 +; CHECK-GI-FP16-NEXT: csel w8, w8, w9, lt +; CHECK-GI-FP16-NEXT: mov w9, #-262144 // =0xfffc0000 +; CHECK-GI-FP16-NEXT: cmn w8, #64, lsl #12 // =262144 +; CHECK-GI-FP16-NEXT: csel w0, w8, w9, gt +; CHECK-GI-FP16-NEXT: ret %x = call i19 @llvm.fptosi.sat.i19.f16(half %f) ret i19 %x } define i32 @test_signed_i32_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_signed_i32_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: fcvtzs w0, s0 -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_i32_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs w0, h0 -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_i32_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: fcvtzs w0, s0 +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_i32_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs w0, h0 +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_i32_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: fcvtzs w0, s0 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_i32_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs w0, h0 +; CHECK-GI-FP16-NEXT: ret %x = call i32 @llvm.fptosi.sat.i32.f16(half %f) ret i32 %x } define i50 @test_signed_i50_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_signed_i50_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: mov x9, #562949953421311 // =0x1ffffffffffff -; CHECK-CVT-NEXT: fcvtzs x8, s0 -; CHECK-CVT-NEXT: cmp x8, x9 -; CHECK-CVT-NEXT: csel x8, x8, x9, lt -; CHECK-CVT-NEXT: mov x9, #-562949953421312 // =0xfffe000000000000 -; CHECK-CVT-NEXT: cmp x8, x9 -; CHECK-CVT-NEXT: csel x0, x8, x9, gt -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_i50_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs x8, h0 -; CHECK-FP16-NEXT: mov x9, #562949953421311 // =0x1ffffffffffff -; CHECK-FP16-NEXT: cmp x8, x9 -; CHECK-FP16-NEXT: csel x8, x8, x9, lt -; CHECK-FP16-NEXT: mov x9, #-562949953421312 // =0xfffe000000000000 -; CHECK-FP16-NEXT: cmp x8, x9 -; CHECK-FP16-NEXT: csel x0, x8, x9, gt -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_i50_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: mov x9, #562949953421311 // =0x1ffffffffffff +; CHECK-SD-CVT-NEXT: fcvtzs x8, s0 +; CHECK-SD-CVT-NEXT: cmp x8, x9 +; CHECK-SD-CVT-NEXT: csel x8, x8, x9, lt +; CHECK-SD-CVT-NEXT: mov x9, #-562949953421312 // =0xfffe000000000000 +; CHECK-SD-CVT-NEXT: cmp x8, x9 +; CHECK-SD-CVT-NEXT: csel x0, x8, x9, gt +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_i50_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs x8, h0 +; CHECK-SD-FP16-NEXT: mov x9, #562949953421311 // =0x1ffffffffffff +; CHECK-SD-FP16-NEXT: cmp x8, x9 +; CHECK-SD-FP16-NEXT: csel x8, x8, x9, lt +; CHECK-SD-FP16-NEXT: mov x9, #-562949953421312 // =0xfffe000000000000 +; CHECK-SD-FP16-NEXT: cmp x8, x9 +; CHECK-SD-FP16-NEXT: csel x0, x8, x9, gt +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_i50_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov x9, #562949953421311 // =0x1ffffffffffff +; CHECK-GI-CVT-NEXT: mov x10, #-562949953421312 // =0xfffe000000000000 +; CHECK-GI-CVT-NEXT: fcvtzs x8, s0 +; CHECK-GI-CVT-NEXT: cmp x8, x9 +; CHECK-GI-CVT-NEXT: csel x8, x8, x9, lt +; CHECK-GI-CVT-NEXT: cmp x8, x10 +; CHECK-GI-CVT-NEXT: csel x0, x8, x10, gt +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_i50_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs x8, h0 +; CHECK-GI-FP16-NEXT: mov x9, #562949953421311 // =0x1ffffffffffff +; CHECK-GI-FP16-NEXT: mov x10, #-562949953421312 // =0xfffe000000000000 +; CHECK-GI-FP16-NEXT: cmp x8, x9 +; CHECK-GI-FP16-NEXT: csel x8, x8, x9, lt +; CHECK-GI-FP16-NEXT: cmp x8, x10 +; CHECK-GI-FP16-NEXT: csel x0, x8, x10, gt +; CHECK-GI-FP16-NEXT: ret %x = call i50 @llvm.fptosi.sat.i50.f16(half %f) ret i50 %x } define i64 @test_signed_i64_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_signed_i64_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: fcvtzs x0, s0 -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_i64_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs x0, h0 -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_i64_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: fcvtzs x0, s0 +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_i64_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs x0, h0 +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_i64_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: fcvtzs x0, s0 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_i64_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs x0, h0 +; CHECK-GI-FP16-NEXT: ret %x = call i64 @llvm.fptosi.sat.i64.f16(half %f) ret i64 %x } define i100 @test_signed_i100_f16(half %f) nounwind { -; CHECK-LABEL: test_signed_i100_f16: -; CHECK: // %bb.0: -; CHECK-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: str x30, [sp, #8] // 8-byte Folded Spill -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: movi v0.2s, #241, lsl #24 -; CHECK-NEXT: mov w8, #1895825407 // =0x70ffffff -; CHECK-NEXT: mov x10, #34359738367 // =0x7ffffffff -; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload -; CHECK-NEXT: fcmp s8, s0 -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov x8, #-34359738368 // =0xfffffff800000000 -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: csel x8, x8, x1, lt -; CHECK-NEXT: fcmp s8, s0 -; CHECK-NEXT: csel x8, x10, x8, gt -; CHECK-NEXT: csinv x9, x9, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: csel x0, xzr, x9, vs -; CHECK-NEXT: csel x1, xzr, x8, vs -; CHECK-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_i100_f16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: movi v0.2s, #241, lsl #24 +; CHECK-SD-NEXT: mov w8, #1895825407 // =0x70ffffff +; CHECK-SD-NEXT: mov x10, #34359738367 // =0x7ffffffff +; CHECK-SD-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-SD-NEXT: fcmp s8, s0 +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov x8, #-34359738368 // =0xfffffff800000000 +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: csel x8, x8, x1, lt +; CHECK-SD-NEXT: fcmp s8, s0 +; CHECK-SD-NEXT: csel x8, x10, x8, gt +; CHECK-SD-NEXT: csinv x9, x9, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: csel x0, xzr, x9, vs +; CHECK-SD-NEXT: csel x1, xzr, x8, vs +; CHECK-SD-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-SD-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_i100_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov x1, xzr +; CHECK-GI-CVT-NEXT: fcvtzs x0, s0 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_i100_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs x0, h0 +; CHECK-GI-FP16-NEXT: mov x1, xzr +; CHECK-GI-FP16-NEXT: ret %x = call i100 @llvm.fptosi.sat.i100.f16(half %f) ret i100 %x } define i128 @test_signed_i128_f16(half %f) nounwind { -; CHECK-LABEL: test_signed_i128_f16: -; CHECK: // %bb.0: -; CHECK-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: str x30, [sp, #8] // 8-byte Folded Spill -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: movi v0.2s, #255, lsl #24 -; CHECK-NEXT: mov w8, #2130706431 // =0x7effffff -; CHECK-NEXT: mov x10, #9223372036854775807 // =0x7fffffffffffffff -; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload -; CHECK-NEXT: fcmp s8, s0 -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000 -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: csel x8, x8, x1, lt -; CHECK-NEXT: fcmp s8, s0 -; CHECK-NEXT: csel x8, x10, x8, gt -; CHECK-NEXT: csinv x9, x9, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: csel x0, xzr, x9, vs -; CHECK-NEXT: csel x1, xzr, x8, vs -; CHECK-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_i128_f16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: movi v0.2s, #255, lsl #24 +; CHECK-SD-NEXT: mov w8, #2130706431 // =0x7effffff +; CHECK-SD-NEXT: mov x10, #9223372036854775807 // =0x7fffffffffffffff +; CHECK-SD-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-SD-NEXT: fcmp s8, s0 +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000 +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: csel x8, x8, x1, lt +; CHECK-SD-NEXT: fcmp s8, s0 +; CHECK-SD-NEXT: csel x8, x10, x8, gt +; CHECK-SD-NEXT: csinv x9, x9, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: csel x0, xzr, x9, vs +; CHECK-SD-NEXT: csel x1, xzr, x8, vs +; CHECK-SD-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-SD-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_i128_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov x1, xzr +; CHECK-GI-CVT-NEXT: fcvtzs x0, s0 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_i128_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs x0, h0 +; CHECK-GI-FP16-NEXT: mov x1, xzr +; CHECK-GI-FP16-NEXT: ret %x = call i128 @llvm.fptosi.sat.i128.f16(half %f) ret i128 %x } + +define i32 @test_signed_f128_i32(fp128 %f) { +; CHECK-SD-LABEL: test_signed_f128_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #32 +; CHECK-SD-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 32 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w30, -16 +; CHECK-SD-NEXT: adrp x8, .LCPI30_0 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI30_0] +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixtfsi +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: mov w8, #-2147483648 // =0x80000000 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel w19, w8, w0, lt +; CHECK-SD-NEXT: adrp x8, .LCPI30_1 +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI30_1] +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w8, #2147483647 // =0x7fffffff +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w19, w8, w19, gt +; CHECK-SD-NEXT: mov v1.16b, v0.16b +; CHECK-SD-NEXT: bl __unordtf2 +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w0, wzr, w19, ne +; CHECK-SD-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: add sp, sp, #32 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_f128_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #64 +; CHECK-GI-NEXT: stp d9, d8, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w30, -16 +; CHECK-GI-NEXT: .cfi_offset b8, -24 +; CHECK-GI-NEXT: .cfi_offset b9, -32 +; CHECK-GI-NEXT: adrp x8, .LCPI30_1 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI30_1] +; CHECK-GI-NEXT: stp q1, q0, [sp] // 32-byte Folded Spill +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q3, q2, [sp] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v2.d[1] +; CHECK-GI-NEXT: mov d1, v3.d[1] +; CHECK-GI-NEXT: fcsel d8, d2, d3, lt +; CHECK-GI-NEXT: fmov x8, d8 +; CHECK-GI-NEXT: fcsel d9, d0, d1, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d9 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: adrp x8, .LCPI30_0 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI30_0] +; CHECK-GI-NEXT: str q1, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: fcsel d1, d8, d1, gt +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: fcsel d2, d9, d0, gt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d2 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixtfsi +; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w19, w0 +; CHECK-GI-NEXT: mov v1.16b, v0.16b +; CHECK-GI-NEXT: bl __unordtf2 +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: ldp d9, d8, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel w0, wzr, w19, ne +; CHECK-GI-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: ret + %x = call i32 @llvm.fptosi.sat.i32.f128(fp128 %f) + ret i32 %x +} diff --git a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll index 4626fd7..ed78149 100644 --- a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll +++ b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll @@ -1,6 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT -; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16 +; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-CVT +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16 +; RUN: llc < %s -mtriple=aarch64 -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-CVT +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16 + +; CHECK-GI: warning: Instruction selection used fallback path for test_signed_v4f32_v4i50 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for test_signed_v4f16_v4i50 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for test_signed_v8f16_v8i19 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for test_signed_v8f16_v8i50 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for test_signed_v8f16_v8i128 ; ; Float to signed 32-bit -- Vector size variation @@ -16,10 +24,17 @@ declare <7 x i32> @llvm.fptosi.sat.v7f32.v7i32 (<7 x float>) declare <8 x i32> @llvm.fptosi.sat.v8f32.v8i32 (<8 x float>) define <1 x i32> @test_signed_v1f32_v1i32(<1 x float> %f) { -; CHECK-LABEL: test_signed_v1f32_v1i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzs v0.2s, v0.2s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v1f32_v1i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzs v0.2s, v0.2s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v1f32_v1i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs w8, s0 +; CHECK-GI-NEXT: mov v0.s[0], w8 +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-NEXT: ret %x = call <1 x i32> @llvm.fptosi.sat.v1f32.v1i32(<1 x float> %f) ret <1 x i32> %x } @@ -52,79 +67,157 @@ define <4 x i32> @test_signed_v4f32_v4i32(<4 x float> %f) { } define <5 x i32> @test_signed_v5f32_v5i32(<5 x float> %f) { -; CHECK-LABEL: test_signed_v5f32_v5i32: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: // kill: def $s1 killed $s1 def $q1 -; CHECK-NEXT: // kill: def $s2 killed $s2 def $q2 -; CHECK-NEXT: // kill: def $s3 killed $s3 def $q3 -; CHECK-NEXT: // kill: def $s4 killed $s4 def $q4 -; CHECK-NEXT: mov v0.s[1], v1.s[0] -; CHECK-NEXT: fcvtzs v4.4s, v4.4s -; CHECK-NEXT: mov v0.s[2], v2.s[0] -; CHECK-NEXT: fmov w4, s4 -; CHECK-NEXT: mov v0.s[3], v3.s[0] -; CHECK-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-NEXT: mov w1, v0.s[1] -; CHECK-NEXT: mov w2, v0.s[2] -; CHECK-NEXT: mov w3, v0.s[3] -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v5f32_v5i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-SD-NEXT: // kill: def $s1 killed $s1 def $q1 +; CHECK-SD-NEXT: // kill: def $s2 killed $s2 def $q2 +; CHECK-SD-NEXT: // kill: def $s3 killed $s3 def $q3 +; CHECK-SD-NEXT: // kill: def $s4 killed $s4 def $q4 +; CHECK-SD-NEXT: mov v0.s[1], v1.s[0] +; CHECK-SD-NEXT: fcvtzs v4.4s, v4.4s +; CHECK-SD-NEXT: mov v0.s[2], v2.s[0] +; CHECK-SD-NEXT: fmov w4, s4 +; CHECK-SD-NEXT: mov v0.s[3], v3.s[0] +; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-NEXT: mov w1, v0.s[1] +; CHECK-SD-NEXT: mov w2, v0.s[2] +; CHECK-SD-NEXT: mov w3, v0.s[3] +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v5f32_v5i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-GI-NEXT: // kill: def $s1 killed $s1 def $q1 +; CHECK-GI-NEXT: // kill: def $s2 killed $s2 def $q2 +; CHECK-GI-NEXT: // kill: def $s3 killed $s3 def $q3 +; CHECK-GI-NEXT: // kill: def $s4 killed $s4 def $q4 +; CHECK-GI-NEXT: mov v0.s[1], v1.s[0] +; CHECK-GI-NEXT: fcvtzs v1.4s, v4.4s +; CHECK-GI-NEXT: mov v0.s[2], v2.s[0] +; CHECK-GI-NEXT: fmov w4, s1 +; CHECK-GI-NEXT: mov v0.s[3], v3.s[0] +; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-NEXT: mov s2, v0.s[1] +; CHECK-GI-NEXT: mov s3, v0.s[2] +; CHECK-GI-NEXT: mov s4, v0.s[3] +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: fmov w1, s2 +; CHECK-GI-NEXT: fmov w2, s3 +; CHECK-GI-NEXT: fmov w3, s4 +; CHECK-GI-NEXT: ret %x = call <5 x i32> @llvm.fptosi.sat.v5f32.v5i32(<5 x float> %f) ret <5 x i32> %x } define <6 x i32> @test_signed_v6f32_v6i32(<6 x float> %f) { -; CHECK-LABEL: test_signed_v6f32_v6i32: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: // kill: def $s1 killed $s1 def $q1 -; CHECK-NEXT: // kill: def $s2 killed $s2 def $q2 -; CHECK-NEXT: // kill: def $s4 killed $s4 def $q4 -; CHECK-NEXT: // kill: def $s5 killed $s5 def $q5 -; CHECK-NEXT: // kill: def $s3 killed $s3 def $q3 -; CHECK-NEXT: mov v0.s[1], v1.s[0] -; CHECK-NEXT: mov v4.s[1], v5.s[0] -; CHECK-NEXT: mov v0.s[2], v2.s[0] -; CHECK-NEXT: fcvtzs v1.4s, v4.4s -; CHECK-NEXT: mov v0.s[3], v3.s[0] -; CHECK-NEXT: mov w5, v1.s[1] -; CHECK-NEXT: fmov w4, s1 -; CHECK-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-NEXT: mov w1, v0.s[1] -; CHECK-NEXT: mov w2, v0.s[2] -; CHECK-NEXT: mov w3, v0.s[3] -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v6f32_v6i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-SD-NEXT: // kill: def $s1 killed $s1 def $q1 +; CHECK-SD-NEXT: // kill: def $s2 killed $s2 def $q2 +; CHECK-SD-NEXT: // kill: def $s4 killed $s4 def $q4 +; CHECK-SD-NEXT: // kill: def $s5 killed $s5 def $q5 +; CHECK-SD-NEXT: // kill: def $s3 killed $s3 def $q3 +; CHECK-SD-NEXT: mov v0.s[1], v1.s[0] +; CHECK-SD-NEXT: mov v4.s[1], v5.s[0] +; CHECK-SD-NEXT: mov v0.s[2], v2.s[0] +; CHECK-SD-NEXT: fcvtzs v1.4s, v4.4s +; CHECK-SD-NEXT: mov v0.s[3], v3.s[0] +; CHECK-SD-NEXT: mov w5, v1.s[1] +; CHECK-SD-NEXT: fmov w4, s1 +; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-NEXT: mov w1, v0.s[1] +; CHECK-SD-NEXT: mov w2, v0.s[2] +; CHECK-SD-NEXT: mov w3, v0.s[3] +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v6f32_v6i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-GI-NEXT: // kill: def $s1 killed $s1 def $q1 +; CHECK-GI-NEXT: // kill: def $s2 killed $s2 def $q2 +; CHECK-GI-NEXT: // kill: def $s4 killed $s4 def $q4 +; CHECK-GI-NEXT: // kill: def $s3 killed $s3 def $q3 +; CHECK-GI-NEXT: // kill: def $s5 killed $s5 def $q5 +; CHECK-GI-NEXT: mov v0.s[1], v1.s[0] +; CHECK-GI-NEXT: mov v4.s[1], v5.s[0] +; CHECK-GI-NEXT: mov v0.s[2], v2.s[0] +; CHECK-GI-NEXT: fcvtzs v1.4s, v4.4s +; CHECK-GI-NEXT: mov v0.s[3], v3.s[0] +; CHECK-GI-NEXT: mov s4, v1.s[1] +; CHECK-GI-NEXT: fmov w4, s1 +; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-NEXT: fmov w5, s4 +; CHECK-GI-NEXT: mov s2, v0.s[1] +; CHECK-GI-NEXT: mov s3, v0.s[2] +; CHECK-GI-NEXT: mov s5, v0.s[3] +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: fmov w1, s2 +; CHECK-GI-NEXT: fmov w2, s3 +; CHECK-GI-NEXT: fmov w3, s5 +; CHECK-GI-NEXT: ret %x = call <6 x i32> @llvm.fptosi.sat.v6f32.v6i32(<6 x float> %f) ret <6 x i32> %x } define <7 x i32> @test_signed_v7f32_v7i32(<7 x float> %f) { -; CHECK-LABEL: test_signed_v7f32_v7i32: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: // kill: def $s1 killed $s1 def $q1 -; CHECK-NEXT: // kill: def $s4 killed $s4 def $q4 -; CHECK-NEXT: // kill: def $s5 killed $s5 def $q5 -; CHECK-NEXT: // kill: def $s2 killed $s2 def $q2 -; CHECK-NEXT: // kill: def $s6 killed $s6 def $q6 -; CHECK-NEXT: // kill: def $s3 killed $s3 def $q3 -; CHECK-NEXT: mov v0.s[1], v1.s[0] -; CHECK-NEXT: mov v4.s[1], v5.s[0] -; CHECK-NEXT: mov v0.s[2], v2.s[0] -; CHECK-NEXT: mov v4.s[2], v6.s[0] -; CHECK-NEXT: mov v0.s[3], v3.s[0] -; CHECK-NEXT: fcvtzs v1.4s, v4.4s -; CHECK-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-NEXT: mov w5, v1.s[1] -; CHECK-NEXT: mov w6, v1.s[2] -; CHECK-NEXT: fmov w4, s1 -; CHECK-NEXT: mov w1, v0.s[1] -; CHECK-NEXT: mov w2, v0.s[2] -; CHECK-NEXT: mov w3, v0.s[3] -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v7f32_v7i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-SD-NEXT: // kill: def $s1 killed $s1 def $q1 +; CHECK-SD-NEXT: // kill: def $s4 killed $s4 def $q4 +; CHECK-SD-NEXT: // kill: def $s5 killed $s5 def $q5 +; CHECK-SD-NEXT: // kill: def $s2 killed $s2 def $q2 +; CHECK-SD-NEXT: // kill: def $s6 killed $s6 def $q6 +; CHECK-SD-NEXT: // kill: def $s3 killed $s3 def $q3 +; CHECK-SD-NEXT: mov v0.s[1], v1.s[0] +; CHECK-SD-NEXT: mov v4.s[1], v5.s[0] +; CHECK-SD-NEXT: mov v0.s[2], v2.s[0] +; CHECK-SD-NEXT: mov v4.s[2], v6.s[0] +; CHECK-SD-NEXT: mov v0.s[3], v3.s[0] +; CHECK-SD-NEXT: fcvtzs v1.4s, v4.4s +; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-NEXT: mov w5, v1.s[1] +; CHECK-SD-NEXT: mov w6, v1.s[2] +; CHECK-SD-NEXT: fmov w4, s1 +; CHECK-SD-NEXT: mov w1, v0.s[1] +; CHECK-SD-NEXT: mov w2, v0.s[2] +; CHECK-SD-NEXT: mov w3, v0.s[3] +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v7f32_v7i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-GI-NEXT: // kill: def $s1 killed $s1 def $q1 +; CHECK-GI-NEXT: // kill: def $s4 killed $s4 def $q4 +; CHECK-GI-NEXT: // kill: def $s2 killed $s2 def $q2 +; CHECK-GI-NEXT: // kill: def $s5 killed $s5 def $q5 +; CHECK-GI-NEXT: // kill: def $s3 killed $s3 def $q3 +; CHECK-GI-NEXT: // kill: def $s6 killed $s6 def $q6 +; CHECK-GI-NEXT: mov v0.s[1], v1.s[0] +; CHECK-GI-NEXT: mov v4.s[1], v5.s[0] +; CHECK-GI-NEXT: mov v0.s[2], v2.s[0] +; CHECK-GI-NEXT: mov v4.s[2], v6.s[0] +; CHECK-GI-NEXT: mov v0.s[3], v3.s[0] +; CHECK-GI-NEXT: fcvtzs v1.4s, v4.4s +; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-NEXT: mov s5, v1.s[1] +; CHECK-GI-NEXT: mov s6, v1.s[2] +; CHECK-GI-NEXT: fmov w4, s1 +; CHECK-GI-NEXT: mov s2, v0.s[1] +; CHECK-GI-NEXT: mov s3, v0.s[2] +; CHECK-GI-NEXT: mov s4, v0.s[3] +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: fmov w5, s5 +; CHECK-GI-NEXT: fmov w6, s6 +; CHECK-GI-NEXT: fmov w1, s2 +; CHECK-GI-NEXT: fmov w2, s3 +; CHECK-GI-NEXT: fmov w3, s4 +; CHECK-GI-NEXT: ret %x = call <7 x i32> @llvm.fptosi.sat.v7f32.v7i32(<7 x float> %f) ret <7 x i32> %x } @@ -151,86 +244,238 @@ declare <5 x i32> @llvm.fptosi.sat.v5f64.v5i32 (<5 x double>) declare <6 x i32> @llvm.fptosi.sat.v6f64.v6i32 (<6 x double>) define <1 x i32> @test_signed_v1f64_v1i32(<1 x double> %f) { -; CHECK-LABEL: test_signed_v1f64_v1i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzs w8, d0 -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v1f64_v1i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzs w8, d0 +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v1f64_v1i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs w8, d0 +; CHECK-GI-NEXT: mov v0.s[0], w8 +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-NEXT: ret %x = call <1 x i32> @llvm.fptosi.sat.v1f64.v1i32(<1 x double> %f) ret <1 x i32> %x } define <2 x i32> @test_signed_v2f64_v2i32(<2 x double> %f) { -; CHECK-LABEL: test_signed_v2f64_v2i32: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: fcvtzs w8, d0 -; CHECK-NEXT: fcvtzs w9, d1 -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov v0.s[1], w9 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f64_v2i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: fcvtzs w8, d0 +; CHECK-SD-NEXT: fcvtzs w9, d1 +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov v0.s[1], w9 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f64_v2i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: adrp x8, .LCPI9_1 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI9_1] +; CHECK-GI-NEXT: adrp x8, .LCPI9_0 +; CHECK-GI-NEXT: cmgt v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI9_0] +; CHECK-GI-NEXT: cmgt v2.2d, v0.2d, v1.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: ret %x = call <2 x i32> @llvm.fptosi.sat.v2f64.v2i32(<2 x double> %f) ret <2 x i32> %x } define <3 x i32> @test_signed_v3f64_v3i32(<3 x double> %f) { -; CHECK-LABEL: test_signed_v3f64_v3i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzs w8, d0 -; CHECK-NEXT: fcvtzs w9, d1 -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: fcvtzs w8, d2 -; CHECK-NEXT: mov v0.s[1], w9 -; CHECK-NEXT: mov v0.s[2], w8 -; CHECK-NEXT: fcvtzs w8, d0 -; CHECK-NEXT: mov v0.s[3], w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v3f64_v3i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzs w8, d0 +; CHECK-SD-NEXT: fcvtzs w9, d1 +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: fcvtzs w8, d2 +; CHECK-SD-NEXT: mov v0.s[1], w9 +; CHECK-SD-NEXT: mov v0.s[2], w8 +; CHECK-SD-NEXT: fcvtzs w8, d0 +; CHECK-SD-NEXT: mov v0.s[3], w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v3f64_v3i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: adrp x8, .LCPI10_1 +; CHECK-GI-NEXT: mov v0.d[1], v1.d[0] +; CHECK-GI-NEXT: fcvtzs v1.2d, v2.2d +; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI10_1] +; CHECK-GI-NEXT: adrp x8, .LCPI10_0 +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: cmgt v4.2d, v2.2d, v1.2d +; CHECK-GI-NEXT: bif v1.16b, v2.16b, v4.16b +; CHECK-GI-NEXT: cmgt v3.2d, v2.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v2.16b, v3.16b +; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI10_0] +; CHECK-GI-NEXT: cmgt v4.2d, v1.2d, v2.2d +; CHECK-GI-NEXT: cmgt v3.2d, v0.2d, v2.2d +; CHECK-GI-NEXT: bif v1.16b, v2.16b, v4.16b +; CHECK-GI-NEXT: bif v0.16b, v2.16b, v3.16b +; CHECK-GI-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: ret %x = call <3 x i32> @llvm.fptosi.sat.v3f64.v3i32(<3 x double> %f) ret <3 x i32> %x } define <4 x i32> @test_signed_v4f64_v4i32(<4 x double> %f) { -; CHECK-LABEL: test_signed_v4f64_v4i32: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d2, v0.d[1] -; CHECK-NEXT: fcvtzs w8, d0 -; CHECK-NEXT: fcvtzs w9, d2 -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: fcvtzs w8, d1 -; CHECK-NEXT: mov d1, v1.d[1] -; CHECK-NEXT: mov v0.s[1], w9 -; CHECK-NEXT: mov v0.s[2], w8 -; CHECK-NEXT: fcvtzs w8, d1 -; CHECK-NEXT: mov v0.s[3], w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v4f64_v4i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d2, v0.d[1] +; CHECK-SD-NEXT: fcvtzs w8, d0 +; CHECK-SD-NEXT: fcvtzs w9, d2 +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: fcvtzs w8, d1 +; CHECK-SD-NEXT: mov d1, v1.d[1] +; CHECK-SD-NEXT: mov v0.s[1], w9 +; CHECK-SD-NEXT: mov v0.s[2], w8 +; CHECK-SD-NEXT: fcvtzs w8, d1 +; CHECK-SD-NEXT: mov v0.s[3], w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v4f64_v4i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: fcvtzs v1.2d, v1.2d +; CHECK-GI-NEXT: adrp x8, .LCPI11_1 +; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI11_1] +; CHECK-GI-NEXT: adrp x8, .LCPI11_0 +; CHECK-GI-NEXT: cmgt v3.2d, v2.2d, v0.2d +; CHECK-GI-NEXT: cmgt v4.2d, v2.2d, v1.2d +; CHECK-GI-NEXT: bif v0.16b, v2.16b, v3.16b +; CHECK-GI-NEXT: bif v1.16b, v2.16b, v4.16b +; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI11_0] +; CHECK-GI-NEXT: cmgt v3.2d, v0.2d, v2.2d +; CHECK-GI-NEXT: cmgt v4.2d, v1.2d, v2.2d +; CHECK-GI-NEXT: bif v0.16b, v2.16b, v3.16b +; CHECK-GI-NEXT: bif v1.16b, v2.16b, v4.16b +; CHECK-GI-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: ret %x = call <4 x i32> @llvm.fptosi.sat.v4f64.v4i32(<4 x double> %f) ret <4 x i32> %x } define <5 x i32> @test_signed_v5f64_v5i32(<5 x double> %f) { -; CHECK-LABEL: test_signed_v5f64_v5i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzs w0, d0 -; CHECK-NEXT: fcvtzs w1, d1 -; CHECK-NEXT: fcvtzs w2, d2 -; CHECK-NEXT: fcvtzs w3, d3 -; CHECK-NEXT: fcvtzs w4, d4 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v5f64_v5i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzs w0, d0 +; CHECK-SD-NEXT: fcvtzs w1, d1 +; CHECK-SD-NEXT: fcvtzs w2, d2 +; CHECK-SD-NEXT: fcvtzs w3, d3 +; CHECK-SD-NEXT: fcvtzs w4, d4 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v5f64_v5i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-GI-NEXT: // kill: def $d3 killed $d3 def $q3 +; CHECK-GI-NEXT: adrp x8, .LCPI12_1 +; CHECK-GI-NEXT: // kill: def $d4 killed $d4 def $q4 +; CHECK-GI-NEXT: mov v0.d[1], v1.d[0] +; CHECK-GI-NEXT: mov v2.d[1], v3.d[0] +; CHECK-GI-NEXT: fcvtzs v3.2d, v4.2d +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: fcvtzs v1.2d, v2.2d +; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI12_1] +; CHECK-GI-NEXT: adrp x8, .LCPI12_0 +; CHECK-GI-NEXT: cmgt v4.2d, v2.2d, v0.2d +; CHECK-GI-NEXT: cmgt v5.2d, v2.2d, v1.2d +; CHECK-GI-NEXT: bif v0.16b, v2.16b, v4.16b +; CHECK-GI-NEXT: bif v1.16b, v2.16b, v5.16b +; CHECK-GI-NEXT: cmgt v4.2d, v2.2d, v3.2d +; CHECK-GI-NEXT: ldr q5, [x8, :lo12:.LCPI12_0] +; CHECK-GI-NEXT: bit v2.16b, v3.16b, v4.16b +; CHECK-GI-NEXT: cmgt v3.2d, v0.2d, v5.2d +; CHECK-GI-NEXT: cmgt v4.2d, v1.2d, v5.2d +; CHECK-GI-NEXT: bif v0.16b, v5.16b, v3.16b +; CHECK-GI-NEXT: bif v1.16b, v5.16b, v4.16b +; CHECK-GI-NEXT: cmgt v3.2d, v2.2d, v5.2d +; CHECK-GI-NEXT: bif v2.16b, v5.16b, v3.16b +; CHECK-GI-NEXT: mov d3, v0.d[1] +; CHECK-GI-NEXT: mov d4, v1.d[1] +; CHECK-GI-NEXT: fmov x0, d0 +; CHECK-GI-NEXT: fmov x2, d1 +; CHECK-GI-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-GI-NEXT: // kill: def $w2 killed $w2 killed $x2 +; CHECK-GI-NEXT: fmov x4, d2 +; CHECK-GI-NEXT: fmov x1, d3 +; CHECK-GI-NEXT: fmov x3, d4 +; CHECK-GI-NEXT: // kill: def $w4 killed $w4 killed $x4 +; CHECK-GI-NEXT: // kill: def $w1 killed $w1 killed $x1 +; CHECK-GI-NEXT: // kill: def $w3 killed $w3 killed $x3 +; CHECK-GI-NEXT: ret %x = call <5 x i32> @llvm.fptosi.sat.v5f64.v5i32(<5 x double> %f) ret <5 x i32> %x } define <6 x i32> @test_signed_v6f64_v6i32(<6 x double> %f) { -; CHECK-LABEL: test_signed_v6f64_v6i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzs w0, d0 -; CHECK-NEXT: fcvtzs w1, d1 -; CHECK-NEXT: fcvtzs w2, d2 -; CHECK-NEXT: fcvtzs w3, d3 -; CHECK-NEXT: fcvtzs w4, d4 -; CHECK-NEXT: fcvtzs w5, d5 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v6f64_v6i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzs w0, d0 +; CHECK-SD-NEXT: fcvtzs w1, d1 +; CHECK-SD-NEXT: fcvtzs w2, d2 +; CHECK-SD-NEXT: fcvtzs w3, d3 +; CHECK-SD-NEXT: fcvtzs w4, d4 +; CHECK-SD-NEXT: fcvtzs w5, d5 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v6f64_v6i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: // kill: def $d4 killed $d4 def $q4 +; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-GI-NEXT: // kill: def $d3 killed $d3 def $q3 +; CHECK-GI-NEXT: // kill: def $d5 killed $d5 def $q5 +; CHECK-GI-NEXT: adrp x8, .LCPI13_1 +; CHECK-GI-NEXT: mov v0.d[1], v1.d[0] +; CHECK-GI-NEXT: mov v2.d[1], v3.d[0] +; CHECK-GI-NEXT: mov v4.d[1], v5.d[0] +; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI13_1] +; CHECK-GI-NEXT: adrp x8, .LCPI13_0 +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: fcvtzs v1.2d, v2.2d +; CHECK-GI-NEXT: fcvtzs v2.2d, v4.2d +; CHECK-GI-NEXT: cmgt v4.2d, v3.2d, v0.2d +; CHECK-GI-NEXT: cmgt v5.2d, v3.2d, v1.2d +; CHECK-GI-NEXT: cmgt v6.2d, v3.2d, v2.2d +; CHECK-GI-NEXT: bif v0.16b, v3.16b, v4.16b +; CHECK-GI-NEXT: bif v1.16b, v3.16b, v5.16b +; CHECK-GI-NEXT: bif v2.16b, v3.16b, v6.16b +; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI13_0] +; CHECK-GI-NEXT: cmgt v4.2d, v0.2d, v3.2d +; CHECK-GI-NEXT: cmgt v5.2d, v1.2d, v3.2d +; CHECK-GI-NEXT: cmgt v6.2d, v2.2d, v3.2d +; CHECK-GI-NEXT: bif v0.16b, v3.16b, v4.16b +; CHECK-GI-NEXT: bif v1.16b, v3.16b, v5.16b +; CHECK-GI-NEXT: bif v2.16b, v3.16b, v6.16b +; CHECK-GI-NEXT: mov d3, v0.d[1] +; CHECK-GI-NEXT: mov d4, v1.d[1] +; CHECK-GI-NEXT: mov d5, v2.d[1] +; CHECK-GI-NEXT: fmov x0, d0 +; CHECK-GI-NEXT: fmov x2, d1 +; CHECK-GI-NEXT: fmov x4, d2 +; CHECK-GI-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-GI-NEXT: // kill: def $w2 killed $w2 killed $x2 +; CHECK-GI-NEXT: // kill: def $w4 killed $w4 killed $x4 +; CHECK-GI-NEXT: fmov x1, d3 +; CHECK-GI-NEXT: fmov x3, d4 +; CHECK-GI-NEXT: fmov x5, d5 +; CHECK-GI-NEXT: // kill: def $w1 killed $w1 killed $x1 +; CHECK-GI-NEXT: // kill: def $w3 killed $w3 killed $x3 +; CHECK-GI-NEXT: // kill: def $w5 killed $w5 killed $x5 +; CHECK-GI-NEXT: ret %x = call <6 x i32> @llvm.fptosi.sat.v6f64.v6i32(<6 x double> %f) ret <6 x i32> %x } @@ -245,308 +490,727 @@ declare <3 x i32> @llvm.fptosi.sat.v3f128.v3i32 (<3 x fp128>) declare <4 x i32> @llvm.fptosi.sat.v4f128.v4i32 (<4 x fp128>) define <1 x i32> @test_signed_v1f128_v1i32(<1 x fp128> %f) { -; CHECK-LABEL: test_signed_v1f128_v1i32: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w30, -16 -; CHECK-NEXT: adrp x8, .LCPI14_0 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI14_0] -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixtfsi -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: mov w8, #-2147483648 // =0x80000000 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: csel w19, w8, w0, lt -; CHECK-NEXT: adrp x8, .LCPI14_1 -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI14_1] -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w8, #2147483647 // =0x7fffffff -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csel w19, w8, w19, gt -; CHECK-NEXT: mov v1.16b, v0.16b -; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csel w8, wzr, w19, ne -; CHECK-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: add sp, sp, #32 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v1f128_v1i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #32 +; CHECK-SD-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 32 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w30, -16 +; CHECK-SD-NEXT: adrp x8, .LCPI14_0 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI14_0] +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixtfsi +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: mov w8, #-2147483648 // =0x80000000 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel w19, w8, w0, lt +; CHECK-SD-NEXT: adrp x8, .LCPI14_1 +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI14_1] +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w8, #2147483647 // =0x7fffffff +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w19, w8, w19, gt +; CHECK-SD-NEXT: mov v1.16b, v0.16b +; CHECK-SD-NEXT: bl __unordtf2 +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w8, wzr, w19, ne +; CHECK-SD-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: add sp, sp, #32 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v1f128_v1i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #64 +; CHECK-GI-NEXT: stp d9, d8, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w30, -16 +; CHECK-GI-NEXT: .cfi_offset b8, -24 +; CHECK-GI-NEXT: .cfi_offset b9, -32 +; CHECK-GI-NEXT: adrp x8, .LCPI14_1 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI14_1] +; CHECK-GI-NEXT: stp q1, q0, [sp] // 32-byte Folded Spill +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q3, q2, [sp] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v2.d[1] +; CHECK-GI-NEXT: mov d1, v3.d[1] +; CHECK-GI-NEXT: fcsel d8, d2, d3, lt +; CHECK-GI-NEXT: fmov x8, d8 +; CHECK-GI-NEXT: fcsel d9, d0, d1, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d9 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: adrp x8, .LCPI14_0 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI14_0] +; CHECK-GI-NEXT: str q1, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: fcsel d1, d8, d1, gt +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: fcsel d2, d9, d0, gt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d2 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixtfsi +; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w19, w0 +; CHECK-GI-NEXT: mov v1.16b, v0.16b +; CHECK-GI-NEXT: bl __unordtf2 +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: ldp d9, d8, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel w8, wzr, w19, ne +; CHECK-GI-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.s[0], w8 +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: ret %x = call <1 x i32> @llvm.fptosi.sat.v1f128.v1i32(<1 x fp128> %f) ret <1 x i32> %x } define <2 x i32> @test_signed_v2f128_v2i32(<2 x fp128> %f) { -; CHECK-LABEL: test_signed_v2f128_v2i32: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #112 -; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 112 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w30, -48 -; CHECK-NEXT: mov v2.16b, v1.16b -; CHECK-NEXT: stp q1, q0, [sp, #32] // 32-byte Folded Spill -; CHECK-NEXT: adrp x8, .LCPI15_0 -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI15_0] -; CHECK-NEXT: mov v0.16b, v2.16b -; CHECK-NEXT: str q1, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixtfsi -; CHECK-NEXT: adrp x8, .LCPI15_1 -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI15_1] -; CHECK-NEXT: mov w20, #-2147483648 // =0x80000000 -; CHECK-NEXT: csel w19, w20, w0, lt -; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: mov w21, #2147483647 // =0x7fffffff -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csel w19, w21, w19, gt -; CHECK-NEXT: mov v1.16b, v0.16b -; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csel w22, wzr, w19, ne -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixtfsi -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: csel w19, w20, w0, lt -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csel w19, w21, w19, gt -; CHECK-NEXT: mov v1.16b, v0.16b -; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload -; CHECK-NEXT: csel w8, wzr, w19, ne -; CHECK-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov v0.s[1], w22 -; CHECK-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: add sp, sp, #112 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f128_v2i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #112 +; CHECK-SD-NEXT: str x30, [sp, #64] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 112 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w30, -48 +; CHECK-SD-NEXT: mov v2.16b, v1.16b +; CHECK-SD-NEXT: stp q1, q0, [sp, #32] // 32-byte Folded Spill +; CHECK-SD-NEXT: adrp x8, .LCPI15_0 +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI15_0] +; CHECK-SD-NEXT: mov v0.16b, v2.16b +; CHECK-SD-NEXT: str q1, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixtfsi +; CHECK-SD-NEXT: adrp x8, .LCPI15_1 +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI15_1] +; CHECK-SD-NEXT: mov w20, #-2147483648 // =0x80000000 +; CHECK-SD-NEXT: csel w19, w20, w0, lt +; CHECK-SD-NEXT: str q1, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w21, #2147483647 // =0x7fffffff +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w19, w21, w19, gt +; CHECK-SD-NEXT: mov v1.16b, v0.16b +; CHECK-SD-NEXT: bl __unordtf2 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w22, wzr, w19, ne +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixtfsi +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: csel w19, w20, w0, lt +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w19, w21, w19, gt +; CHECK-SD-NEXT: mov v1.16b, v0.16b +; CHECK-SD-NEXT: bl __unordtf2 +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload +; CHECK-SD-NEXT: csel w8, wzr, w19, ne +; CHECK-SD-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov v0.s[1], w22 +; CHECK-SD-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: add sp, sp, #112 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f128_v2i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #128 +; CHECK-GI-NEXT: stp d11, d10, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp d9, d8, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #96] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 128 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w30, -32 +; CHECK-GI-NEXT: .cfi_offset b8, -40 +; CHECK-GI-NEXT: .cfi_offset b9, -48 +; CHECK-GI-NEXT: .cfi_offset b10, -56 +; CHECK-GI-NEXT: .cfi_offset b11, -64 +; CHECK-GI-NEXT: adrp x8, .LCPI15_1 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI15_1] +; CHECK-GI-NEXT: stp q2, q1, [sp, #32] // 32-byte Folded Spill +; CHECK-GI-NEXT: mov v1.16b, v2.16b +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldr q2, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v2.d[1] +; CHECK-GI-NEXT: mov d8, v1.d[1] +; CHECK-GI-NEXT: fcsel d9, d2, d1, lt +; CHECK-GI-NEXT: fmov x8, d9 +; CHECK-GI-NEXT: fcsel d10, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: adrp x8, .LCPI15_0 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI15_0] +; CHECK-GI-NEXT: str q1, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d11, v0.d[1] +; CHECK-GI-NEXT: fcsel d0, d9, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: fcsel d1, d10, d11, gt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixtfsi +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w19, w0 +; CHECK-GI-NEXT: mov v1.16b, v0.16b +; CHECK-GI-NEXT: bl __unordtf2 +; CHECK-GI-NEXT: ldp q1, q0, [sp, #32] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: csel w20, wzr, w19, ne +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q2, q1, [sp, #32] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: fcsel d9, d1, d2, lt +; CHECK-GI-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: fmov x8, d9 +; CHECK-GI-NEXT: fcsel d8, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d8 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: fcsel d1, d8, d11, gt +; CHECK-GI-NEXT: fcsel d0, d9, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixtfsi +; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w19, w0 +; CHECK-GI-NEXT: mov v1.16b, v0.16b +; CHECK-GI-NEXT: bl __unordtf2 +; CHECK-GI-NEXT: mov v0.s[0], w20 +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload +; CHECK-GI-NEXT: csel w8, wzr, w19, ne +; CHECK-GI-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d11, d10, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.s[1], w8 +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-NEXT: add sp, sp, #128 +; CHECK-GI-NEXT: ret %x = call <2 x i32> @llvm.fptosi.sat.v2f128.v2i32(<2 x fp128> %f) ret <2 x i32> %x } define <3 x i32> @test_signed_v3f128_v3i32(<3 x fp128> %f) { -; CHECK-LABEL: test_signed_v3f128_v3i32: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #128 -; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #96] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 128 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w30, -48 -; CHECK-NEXT: stp q0, q2, [sp, #48] // 32-byte Folded Spill -; CHECK-NEXT: mov v2.16b, v1.16b -; CHECK-NEXT: adrp x8, .LCPI16_0 -; CHECK-NEXT: str q1, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI16_0] -; CHECK-NEXT: mov v0.16b, v2.16b -; CHECK-NEXT: str q1, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixtfsi -; CHECK-NEXT: adrp x8, .LCPI16_1 -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI16_1] -; CHECK-NEXT: mov w20, #-2147483648 // =0x80000000 -; CHECK-NEXT: csel w19, w20, w0, lt -; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: mov w21, #2147483647 // =0x7fffffff -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csel w19, w21, w19, gt -; CHECK-NEXT: mov v1.16b, v0.16b -; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csel w22, wzr, w19, ne -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixtfsi -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: csel w19, w20, w0, lt -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csel w19, w21, w19, gt -; CHECK-NEXT: mov v1.16b, v0.16b -; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: csel w8, wzr, w19, ne -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov v0.s[1], w22 -; CHECK-NEXT: str q0, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixtfsi -; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: csel w19, w20, w0, lt -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csel w19, w21, w19, gt -; CHECK-NEXT: mov v1.16b, v0.16b -; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload -; CHECK-NEXT: csel w8, wzr, w19, ne -; CHECK-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload -; CHECK-NEXT: ldp x22, x21, [sp, #96] // 16-byte Folded Reload -; CHECK-NEXT: mov v0.s[2], w8 -; CHECK-NEXT: add sp, sp, #128 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v3f128_v3i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #128 +; CHECK-SD-NEXT: str x30, [sp, #80] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #96] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 128 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w30, -48 +; CHECK-SD-NEXT: stp q0, q2, [sp, #48] // 32-byte Folded Spill +; CHECK-SD-NEXT: mov v2.16b, v1.16b +; CHECK-SD-NEXT: adrp x8, .LCPI16_0 +; CHECK-SD-NEXT: str q1, [sp, #32] // 16-byte Folded Spill +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI16_0] +; CHECK-SD-NEXT: mov v0.16b, v2.16b +; CHECK-SD-NEXT: str q1, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixtfsi +; CHECK-SD-NEXT: adrp x8, .LCPI16_1 +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI16_1] +; CHECK-SD-NEXT: mov w20, #-2147483648 // =0x80000000 +; CHECK-SD-NEXT: csel w19, w20, w0, lt +; CHECK-SD-NEXT: str q1, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w21, #2147483647 // =0x7fffffff +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w19, w21, w19, gt +; CHECK-SD-NEXT: mov v1.16b, v0.16b +; CHECK-SD-NEXT: bl __unordtf2 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w22, wzr, w19, ne +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixtfsi +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: csel w19, w20, w0, lt +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w19, w21, w19, gt +; CHECK-SD-NEXT: mov v1.16b, v0.16b +; CHECK-SD-NEXT: bl __unordtf2 +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel w8, wzr, w19, ne +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov v0.s[1], w22 +; CHECK-SD-NEXT: str q0, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixtfsi +; CHECK-SD-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: csel w19, w20, w0, lt +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w19, w21, w19, gt +; CHECK-SD-NEXT: mov v1.16b, v0.16b +; CHECK-SD-NEXT: bl __unordtf2 +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload +; CHECK-SD-NEXT: csel w8, wzr, w19, ne +; CHECK-SD-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x22, x21, [sp, #96] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov v0.s[2], w8 +; CHECK-SD-NEXT: add sp, sp, #128 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v3f128_v3i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #144 +; CHECK-GI-NEXT: stp d11, d10, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp d9, d8, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x30, x21, [sp, #112] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #128] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 144 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w30, -32 +; CHECK-GI-NEXT: .cfi_offset b8, -40 +; CHECK-GI-NEXT: .cfi_offset b9, -48 +; CHECK-GI-NEXT: .cfi_offset b10, -56 +; CHECK-GI-NEXT: .cfi_offset b11, -64 +; CHECK-GI-NEXT: adrp x8, .LCPI16_1 +; CHECK-GI-NEXT: stp q0, q1, [sp] // 32-byte Folded Spill +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI16_1] +; CHECK-GI-NEXT: str q2, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q1, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldr q2, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v2.d[1] +; CHECK-GI-NEXT: mov d8, v1.d[1] +; CHECK-GI-NEXT: fcsel d10, d2, d1, lt +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: fcsel d11, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d11 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: adrp x8, .LCPI16_0 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI16_0] +; CHECK-GI-NEXT: str q1, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d9, v0.d[1] +; CHECK-GI-NEXT: fcsel d0, d10, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: fcsel d1, d11, d9, gt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixtfsi +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w19, w0 +; CHECK-GI-NEXT: mov v1.16b, v0.16b +; CHECK-GI-NEXT: bl __unordtf2 +; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: csel w20, wzr, w19, ne +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q2, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: fcsel d10, d1, d2, lt +; CHECK-GI-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: fcsel d11, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d11 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: fcsel d1, d11, d9, gt +; CHECK-GI-NEXT: fcsel d0, d10, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixtfsi +; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w19, w0 +; CHECK-GI-NEXT: mov v1.16b, v0.16b +; CHECK-GI-NEXT: bl __unordtf2 +; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: csel w21, wzr, w19, ne +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q3, q1, [sp, #32] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: ldr q2, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov d0, v3.d[1] +; CHECK-GI-NEXT: fcsel d10, d3, d2, lt +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: fcsel d8, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d8 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: fcsel d1, d8, d9, gt +; CHECK-GI-NEXT: fcsel d0, d10, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixtfsi +; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w19, w0 +; CHECK-GI-NEXT: mov v1.16b, v0.16b +; CHECK-GI-NEXT: bl __unordtf2 +; CHECK-GI-NEXT: mov v0.s[0], w20 +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: csel w8, wzr, w19, ne +; CHECK-GI-NEXT: ldp x20, x19, [sp, #128] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #96] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d11, d10, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.s[1], w21 +; CHECK-GI-NEXT: ldp x30, x21, [sp, #112] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.s[2], w8 +; CHECK-GI-NEXT: add sp, sp, #144 +; CHECK-GI-NEXT: ret %x = call <3 x i32> @llvm.fptosi.sat.v3f128.v3i32(<3 x fp128> %f) ret <3 x i32> %x } define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) { -; CHECK-LABEL: test_signed_v4f128_v4i32: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #144 -; CHECK-NEXT: str x30, [sp, #96] // 8-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #112] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #128] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 144 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w30, -48 -; CHECK-NEXT: stp q2, q3, [sp, #64] // 32-byte Folded Spill -; CHECK-NEXT: mov v2.16b, v1.16b -; CHECK-NEXT: adrp x8, .LCPI17_0 -; CHECK-NEXT: str q0, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI17_0] -; CHECK-NEXT: mov v0.16b, v2.16b -; CHECK-NEXT: str q1, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixtfsi -; CHECK-NEXT: adrp x8, .LCPI17_1 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI17_1] -; CHECK-NEXT: mov w20, #-2147483648 // =0x80000000 -; CHECK-NEXT: csel w19, w20, w0, lt -; CHECK-NEXT: str q1, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w21, #2147483647 // =0x7fffffff -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csel w19, w21, w19, gt -; CHECK-NEXT: mov v1.16b, v0.16b -; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: ldp q1, q0, [sp, #32] // 32-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csel w22, wzr, w19, ne -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixtfsi -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: csel w19, w20, w0, lt -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csel w19, w21, w19, gt -; CHECK-NEXT: mov v1.16b, v0.16b -; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: ldr q1, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: csel w8, wzr, w19, ne -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov v0.s[1], w22 -; CHECK-NEXT: str q0, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixtfsi -; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: csel w19, w20, w0, lt -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csel w19, w21, w19, gt -; CHECK-NEXT: mov v1.16b, v0.16b -; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: ldp q1, q0, [sp, #32] // 32-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csel w8, wzr, w19, ne -; CHECK-NEXT: mov v0.s[2], w8 -; CHECK-NEXT: str q0, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixtfsi -; CHECK-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: csel w19, w20, w0, lt -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csel w19, w21, w19, gt -; CHECK-NEXT: mov v1.16b, v0.16b -; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload -; CHECK-NEXT: csel w8, wzr, w19, ne -; CHECK-NEXT: ldp x20, x19, [sp, #128] // 16-byte Folded Reload -; CHECK-NEXT: ldp x22, x21, [sp, #112] // 16-byte Folded Reload -; CHECK-NEXT: mov v0.s[3], w8 -; CHECK-NEXT: add sp, sp, #144 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v4f128_v4i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #144 +; CHECK-SD-NEXT: str x30, [sp, #96] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #112] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #128] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 144 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w30, -48 +; CHECK-SD-NEXT: stp q2, q3, [sp, #64] // 32-byte Folded Spill +; CHECK-SD-NEXT: mov v2.16b, v1.16b +; CHECK-SD-NEXT: adrp x8, .LCPI17_0 +; CHECK-SD-NEXT: str q0, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: str q1, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI17_0] +; CHECK-SD-NEXT: mov v0.16b, v2.16b +; CHECK-SD-NEXT: str q1, [sp, #32] // 16-byte Folded Spill +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixtfsi +; CHECK-SD-NEXT: adrp x8, .LCPI17_1 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI17_1] +; CHECK-SD-NEXT: mov w20, #-2147483648 // =0x80000000 +; CHECK-SD-NEXT: csel w19, w20, w0, lt +; CHECK-SD-NEXT: str q1, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w21, #2147483647 // =0x7fffffff +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w19, w21, w19, gt +; CHECK-SD-NEXT: mov v1.16b, v0.16b +; CHECK-SD-NEXT: bl __unordtf2 +; CHECK-SD-NEXT: ldp q1, q0, [sp, #32] // 32-byte Folded Reload +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w22, wzr, w19, ne +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixtfsi +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: csel w19, w20, w0, lt +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w19, w21, w19, gt +; CHECK-SD-NEXT: mov v1.16b, v0.16b +; CHECK-SD-NEXT: bl __unordtf2 +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: ldr q1, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel w8, wzr, w19, ne +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov v0.s[1], w22 +; CHECK-SD-NEXT: str q0, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixtfsi +; CHECK-SD-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: csel w19, w20, w0, lt +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w19, w21, w19, gt +; CHECK-SD-NEXT: mov v1.16b, v0.16b +; CHECK-SD-NEXT: bl __unordtf2 +; CHECK-SD-NEXT: ldp q1, q0, [sp, #32] // 32-byte Folded Reload +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w8, wzr, w19, ne +; CHECK-SD-NEXT: mov v0.s[2], w8 +; CHECK-SD-NEXT: str q0, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixtfsi +; CHECK-SD-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: csel w19, w20, w0, lt +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csel w19, w21, w19, gt +; CHECK-SD-NEXT: mov v1.16b, v0.16b +; CHECK-SD-NEXT: bl __unordtf2 +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload +; CHECK-SD-NEXT: csel w8, wzr, w19, ne +; CHECK-SD-NEXT: ldp x20, x19, [sp, #128] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x22, x21, [sp, #112] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov v0.s[3], w8 +; CHECK-SD-NEXT: add sp, sp, #144 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v4f128_v4i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #176 +; CHECK-GI-NEXT: stp d11, d10, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp d9, d8, [sp, #112] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #128] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #144] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #160] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 176 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w30, -48 +; CHECK-GI-NEXT: .cfi_offset b8, -56 +; CHECK-GI-NEXT: .cfi_offset b9, -64 +; CHECK-GI-NEXT: .cfi_offset b10, -72 +; CHECK-GI-NEXT: .cfi_offset b11, -80 +; CHECK-GI-NEXT: adrp x8, .LCPI17_1 +; CHECK-GI-NEXT: stp q0, q1, [sp] // 32-byte Folded Spill +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI17_1] +; CHECK-GI-NEXT: str q2, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q3, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q1, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldr q2, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v2.d[1] +; CHECK-GI-NEXT: mov d8, v1.d[1] +; CHECK-GI-NEXT: fcsel d10, d2, d1, lt +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: fcsel d11, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d11 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: adrp x8, .LCPI17_0 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI17_0] +; CHECK-GI-NEXT: str q1, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d9, v0.d[1] +; CHECK-GI-NEXT: fcsel d0, d10, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: fcsel d1, d11, d9, gt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixtfsi +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w19, w0 +; CHECK-GI-NEXT: mov v1.16b, v0.16b +; CHECK-GI-NEXT: bl __unordtf2 +; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: csel w20, wzr, w19, ne +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q1, q4, [sp, #64] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov d0, v2.d[1] +; CHECK-GI-NEXT: fcsel d10, d2, d4, lt +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: fcsel d11, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d11 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: fcsel d1, d11, d9, gt +; CHECK-GI-NEXT: fcsel d0, d10, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixtfsi +; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w19, w0 +; CHECK-GI-NEXT: mov v1.16b, v0.16b +; CHECK-GI-NEXT: bl __unordtf2 +; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: csel w21, wzr, w19, ne +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldr q1, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q2, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: fcsel d10, d1, d2, lt +; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: fcsel d11, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d11 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: fcsel d1, d11, d9, gt +; CHECK-GI-NEXT: fcsel d0, d10, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixtfsi +; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w19, w0 +; CHECK-GI-NEXT: mov v1.16b, v0.16b +; CHECK-GI-NEXT: bl __unordtf2 +; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: csel w22, wzr, w19, ne +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q5, q1, [sp, #48] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: ldr q2, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov d0, v5.d[1] +; CHECK-GI-NEXT: fcsel d10, d5, d2, lt +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: fcsel d8, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d8 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: fcsel d1, d8, d9, gt +; CHECK-GI-NEXT: fcsel d0, d10, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixtfsi +; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w19, w0 +; CHECK-GI-NEXT: mov v1.16b, v0.16b +; CHECK-GI-NEXT: bl __unordtf2 +; CHECK-GI-NEXT: mov v0.s[0], w20 +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: ldr x30, [sp, #128] // 8-byte Folded Reload +; CHECK-GI-NEXT: csel w8, wzr, w19, ne +; CHECK-GI-NEXT: ldp x20, x19, [sp, #160] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #112] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d11, d10, [sp, #96] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.s[1], w21 +; CHECK-GI-NEXT: mov v0.s[2], w22 +; CHECK-GI-NEXT: ldp x22, x21, [sp, #144] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.s[3], w8 +; CHECK-GI-NEXT: add sp, sp, #176 +; CHECK-GI-NEXT: ret %x = call <4 x i32> @llvm.fptosi.sat.v4f128.v4i32(<4 x fp128> %f) ret <4 x i32> %x } @@ -565,29 +1229,53 @@ declare <7 x i32> @llvm.fptosi.sat.v7f16.v7i32 (<7 x half>) declare <8 x i32> @llvm.fptosi.sat.v8f16.v8i32 (<8 x half>) define <1 x i32> @test_signed_v1f16_v1i32(<1 x half> %f) { -; CHECK-CVT-LABEL: test_signed_v1f16_v1i32: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: fcvtzs w8, s0 -; CHECK-CVT-NEXT: fmov s0, w8 -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_v1f16_v1i32: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs w8, h0 -; CHECK-FP16-NEXT: fmov s0, w8 -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_v1f16_v1i32: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: fcvtzs w8, s0 +; CHECK-SD-CVT-NEXT: fmov s0, w8 +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_v1f16_v1i32: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs w8, h0 +; CHECK-SD-FP16-NEXT: fmov s0, w8 +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v1f16_v1i32: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: fcvtzs w8, s0 +; CHECK-GI-CVT-NEXT: mov v0.s[0], w8 +; CHECK-GI-CVT-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v1f16_v1i32: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs w8, h0 +; CHECK-GI-FP16-NEXT: mov v0.s[0], w8 +; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-FP16-NEXT: ret %x = call <1 x i32> @llvm.fptosi.sat.v1f16.v1i32(<1 x half> %f) ret <1 x i32> %x } define <2 x i32> @test_signed_v2f16_v2i32(<2 x half> %f) { -; CHECK-LABEL: test_signed_v2f16_v2i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl v0.4s, v0.4h -; CHECK-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f16_v2i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f16_v2i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: mov h1, v0.h[1] +; CHECK-GI-NEXT: mov v0.h[1], v1.h[0] +; CHECK-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-NEXT: fcvtzs v0.2s, v0.2s +; CHECK-GI-NEXT: ret %x = call <2 x i32> @llvm.fptosi.sat.v2f16.v2i32(<2 x half> %f) ret <2 x i32> %x } @@ -613,67 +1301,135 @@ define <4 x i32> @test_signed_v4f16_v4i32(<4 x half> %f) { } define <5 x i32> @test_signed_v5f16_v5i32(<5 x half> %f) { -; CHECK-LABEL: test_signed_v5f16_v5i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl v1.4s, v0.4h -; CHECK-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-NEXT: mov w1, v1.s[1] -; CHECK-NEXT: mov w2, v1.s[2] -; CHECK-NEXT: mov w3, v1.s[3] -; CHECK-NEXT: fmov w0, s1 -; CHECK-NEXT: fmov w4, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v5f16_v5i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtl v1.4s, v0.4h +; CHECK-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-SD-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-NEXT: mov w1, v1.s[1] +; CHECK-SD-NEXT: mov w2, v1.s[2] +; CHECK-SD-NEXT: mov w3, v1.s[3] +; CHECK-SD-NEXT: fmov w0, s1 +; CHECK-SD-NEXT: fmov w4, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v5f16_v5i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-GI-NEXT: mov v0.h[0], v0.h[4] +; CHECK-GI-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-NEXT: mov s2, v1.s[1] +; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-NEXT: mov s3, v1.s[2] +; CHECK-GI-NEXT: mov s4, v1.s[3] +; CHECK-GI-NEXT: fmov w0, s1 +; CHECK-GI-NEXT: fmov w1, s2 +; CHECK-GI-NEXT: fmov w2, s3 +; CHECK-GI-NEXT: fmov w4, s0 +; CHECK-GI-NEXT: fmov w3, s4 +; CHECK-GI-NEXT: ret %x = call <5 x i32> @llvm.fptosi.sat.v5f16.v5i32(<5 x half> %f) ret <5 x i32> %x } define <6 x i32> @test_signed_v6f16_v6i32(<6 x half> %f) { -; CHECK-LABEL: test_signed_v6f16_v6i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl v1.4s, v0.4h -; CHECK-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-NEXT: mov w1, v1.s[1] -; CHECK-NEXT: mov w2, v1.s[2] -; CHECK-NEXT: mov w5, v0.s[1] -; CHECK-NEXT: mov w3, v1.s[3] -; CHECK-NEXT: fmov w4, s0 -; CHECK-NEXT: fmov w0, s1 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v6f16_v6i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtl v1.4s, v0.4h +; CHECK-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-SD-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-NEXT: mov w1, v1.s[1] +; CHECK-SD-NEXT: mov w2, v1.s[2] +; CHECK-SD-NEXT: mov w5, v0.s[1] +; CHECK-SD-NEXT: mov w3, v1.s[3] +; CHECK-SD-NEXT: fmov w4, s0 +; CHECK-SD-NEXT: fmov w0, s1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v6f16_v6i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov v1.h[0], v0.h[4] +; CHECK-GI-NEXT: mov v1.h[1], v0.h[5] +; CHECK-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-GI-NEXT: mov s2, v0.s[1] +; CHECK-GI-NEXT: mov s3, v0.s[2] +; CHECK-GI-NEXT: mov s4, v0.s[3] +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: mov s5, v1.s[1] +; CHECK-GI-NEXT: fmov w1, s2 +; CHECK-GI-NEXT: fmov w2, s3 +; CHECK-GI-NEXT: fmov w3, s4 +; CHECK-GI-NEXT: fmov w4, s1 +; CHECK-GI-NEXT: fmov w5, s5 +; CHECK-GI-NEXT: ret %x = call <6 x i32> @llvm.fptosi.sat.v6f16.v6i32(<6 x half> %f) ret <6 x i32> %x } define <7 x i32> @test_signed_v7f16_v7i32(<7 x half> %f) { -; CHECK-LABEL: test_signed_v7f16_v7i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl v1.4s, v0.4h -; CHECK-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-NEXT: mov w1, v1.s[1] -; CHECK-NEXT: mov w2, v1.s[2] -; CHECK-NEXT: mov w3, v1.s[3] -; CHECK-NEXT: mov w5, v0.s[1] -; CHECK-NEXT: mov w6, v0.s[2] -; CHECK-NEXT: fmov w0, s1 -; CHECK-NEXT: fmov w4, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v7f16_v7i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtl v1.4s, v0.4h +; CHECK-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-SD-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-NEXT: mov w1, v1.s[1] +; CHECK-SD-NEXT: mov w2, v1.s[2] +; CHECK-SD-NEXT: mov w3, v1.s[3] +; CHECK-SD-NEXT: mov w5, v0.s[1] +; CHECK-SD-NEXT: mov w6, v0.s[2] +; CHECK-SD-NEXT: fmov w0, s1 +; CHECK-SD-NEXT: fmov w4, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v7f16_v7i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov v1.h[0], v0.h[4] +; CHECK-GI-NEXT: mov v1.h[1], v0.h[5] +; CHECK-GI-NEXT: mov v1.h[2], v0.h[6] +; CHECK-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-GI-NEXT: mov s2, v0.s[1] +; CHECK-GI-NEXT: mov s3, v0.s[2] +; CHECK-GI-NEXT: mov s4, v0.s[3] +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: mov s5, v1.s[1] +; CHECK-GI-NEXT: mov s6, v1.s[2] +; CHECK-GI-NEXT: fmov w1, s2 +; CHECK-GI-NEXT: fmov w2, s3 +; CHECK-GI-NEXT: fmov w3, s4 +; CHECK-GI-NEXT: fmov w4, s1 +; CHECK-GI-NEXT: fmov w5, s5 +; CHECK-GI-NEXT: fmov w6, s6 +; CHECK-GI-NEXT: ret %x = call <7 x i32> @llvm.fptosi.sat.v7f16.v7i32(<7 x half> %f) ret <7 x i32> %x } define <8 x i32> @test_signed_v8f16_v8i32(<8 x half> %f) { -; CHECK-LABEL: test_signed_v8f16_v8i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-NEXT: fcvtl v0.4s, v0.4h -; CHECK-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v8f16_v8i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v8f16_v8i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-GI-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-GI-NEXT: fcvtzs v0.4s, v1.4s +; CHECK-GI-NEXT: fcvtzs v1.4s, v2.4s +; CHECK-GI-NEXT: ret %x = call <8 x i32> @llvm.fptosi.sat.v8f16.v8i32(<8 x half> %f) ret <8 x i32> %x } @@ -693,66 +1449,111 @@ declare <2 x i100> @llvm.fptosi.sat.v2f32.v2i100(<2 x float>) declare <2 x i128> @llvm.fptosi.sat.v2f32.v2i128(<2 x float>) define <2 x i1> @test_signed_v2f32_v2i1(<2 x float> %f) { -; CHECK-LABEL: test_signed_v2f32_v2i1: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.2d, #0000000000000000 -; CHECK-NEXT: fcvtzs v0.2s, v0.2s -; CHECK-NEXT: movi v2.2d, #0xffffffffffffffff -; CHECK-NEXT: smin v0.2s, v0.2s, v1.2s -; CHECK-NEXT: smax v0.2s, v0.2s, v2.2s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f32_v2i1: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v1.2d, #0000000000000000 +; CHECK-SD-NEXT: fcvtzs v0.2s, v0.2s +; CHECK-SD-NEXT: movi v2.2d, #0xffffffffffffffff +; CHECK-SD-NEXT: smin v0.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: smax v0.2s, v0.2s, v2.2s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f32_v2i1: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v1.2d, #0000000000000000 +; CHECK-GI-NEXT: fcvtzs v0.2s, v0.2s +; CHECK-GI-NEXT: movi d2, #0xffffffffffffffff +; CHECK-GI-NEXT: smin v0.2s, v0.2s, v1.2s +; CHECK-GI-NEXT: smax v0.2s, v0.2s, v2.2s +; CHECK-GI-NEXT: ret %x = call <2 x i1> @llvm.fptosi.sat.v2f32.v2i1(<2 x float> %f) ret <2 x i1> %x } define <2 x i8> @test_signed_v2f32_v2i8(<2 x float> %f) { -; CHECK-LABEL: test_signed_v2f32_v2i8: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.2s, #127 -; CHECK-NEXT: fcvtzs v0.2s, v0.2s -; CHECK-NEXT: smin v0.2s, v0.2s, v1.2s -; CHECK-NEXT: mvni v1.2s, #127 -; CHECK-NEXT: smax v0.2s, v0.2s, v1.2s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f32_v2i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v1.2s, #127 +; CHECK-SD-NEXT: fcvtzs v0.2s, v0.2s +; CHECK-SD-NEXT: smin v0.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: mvni v1.2s, #127 +; CHECK-SD-NEXT: smax v0.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f32_v2i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v1.2s, #127 +; CHECK-GI-NEXT: fcvtzs v0.2s, v0.2s +; CHECK-GI-NEXT: mvni v2.2s, #127 +; CHECK-GI-NEXT: smin v0.2s, v0.2s, v1.2s +; CHECK-GI-NEXT: smax v0.2s, v0.2s, v2.2s +; CHECK-GI-NEXT: ret %x = call <2 x i8> @llvm.fptosi.sat.v2f32.v2i8(<2 x float> %f) ret <2 x i8> %x } define <2 x i13> @test_signed_v2f32_v2i13(<2 x float> %f) { -; CHECK-LABEL: test_signed_v2f32_v2i13: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.2s, #15, msl #8 -; CHECK-NEXT: fcvtzs v0.2s, v0.2s -; CHECK-NEXT: smin v0.2s, v0.2s, v1.2s -; CHECK-NEXT: mvni v1.2s, #15, msl #8 -; CHECK-NEXT: smax v0.2s, v0.2s, v1.2s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f32_v2i13: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v1.2s, #15, msl #8 +; CHECK-SD-NEXT: fcvtzs v0.2s, v0.2s +; CHECK-SD-NEXT: smin v0.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: mvni v1.2s, #15, msl #8 +; CHECK-SD-NEXT: smax v0.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f32_v2i13: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v1.2s, #15, msl #8 +; CHECK-GI-NEXT: fcvtzs v0.2s, v0.2s +; CHECK-GI-NEXT: mvni v2.2s, #15, msl #8 +; CHECK-GI-NEXT: smin v0.2s, v0.2s, v1.2s +; CHECK-GI-NEXT: smax v0.2s, v0.2s, v2.2s +; CHECK-GI-NEXT: ret %x = call <2 x i13> @llvm.fptosi.sat.v2f32.v2i13(<2 x float> %f) ret <2 x i13> %x } define <2 x i16> @test_signed_v2f32_v2i16(<2 x float> %f) { -; CHECK-LABEL: test_signed_v2f32_v2i16: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.2s, #127, msl #8 -; CHECK-NEXT: fcvtzs v0.2s, v0.2s -; CHECK-NEXT: smin v0.2s, v0.2s, v1.2s -; CHECK-NEXT: mvni v1.2s, #127, msl #8 -; CHECK-NEXT: smax v0.2s, v0.2s, v1.2s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f32_v2i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v1.2s, #127, msl #8 +; CHECK-SD-NEXT: fcvtzs v0.2s, v0.2s +; CHECK-SD-NEXT: smin v0.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: mvni v1.2s, #127, msl #8 +; CHECK-SD-NEXT: smax v0.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f32_v2i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v1.2s, #127, msl #8 +; CHECK-GI-NEXT: fcvtzs v0.2s, v0.2s +; CHECK-GI-NEXT: mvni v2.2s, #127, msl #8 +; CHECK-GI-NEXT: smin v0.2s, v0.2s, v1.2s +; CHECK-GI-NEXT: smax v0.2s, v0.2s, v2.2s +; CHECK-GI-NEXT: ret %x = call <2 x i16> @llvm.fptosi.sat.v2f32.v2i16(<2 x float> %f) ret <2 x i16> %x } define <2 x i19> @test_signed_v2f32_v2i19(<2 x float> %f) { -; CHECK-LABEL: test_signed_v2f32_v2i19: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.2s, #3, msl #16 -; CHECK-NEXT: fcvtzs v0.2s, v0.2s -; CHECK-NEXT: smin v0.2s, v0.2s, v1.2s -; CHECK-NEXT: mvni v1.2s, #3, msl #16 -; CHECK-NEXT: smax v0.2s, v0.2s, v1.2s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f32_v2i19: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v1.2s, #3, msl #16 +; CHECK-SD-NEXT: fcvtzs v0.2s, v0.2s +; CHECK-SD-NEXT: smin v0.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: mvni v1.2s, #3, msl #16 +; CHECK-SD-NEXT: smax v0.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f32_v2i19: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v1.2s, #3, msl #16 +; CHECK-GI-NEXT: fcvtzs v0.2s, v0.2s +; CHECK-GI-NEXT: mvni v2.2s, #3, msl #16 +; CHECK-GI-NEXT: smin v0.2s, v0.2s, v1.2s +; CHECK-GI-NEXT: smax v0.2s, v0.2s, v2.2s +; CHECK-GI-NEXT: ret %x = call <2 x i19> @llvm.fptosi.sat.v2f32.v2i19(<2 x float> %f) ret <2 x i19> %x } @@ -767,25 +1568,39 @@ define <2 x i32> @test_signed_v2f32_v2i32_duplicate(<2 x float> %f) { } define <2 x i50> @test_signed_v2f32_v2i50(<2 x float> %f) { -; CHECK-LABEL: test_signed_v2f32_v2i50: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: mov s1, v0.s[1] -; CHECK-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff -; CHECK-NEXT: fcvtzs x10, s0 -; CHECK-NEXT: mov x11, #-562949953421312 // =0xfffe000000000000 -; CHECK-NEXT: fcvtzs x9, s1 -; CHECK-NEXT: cmp x9, x8 -; CHECK-NEXT: csel x9, x9, x8, lt -; CHECK-NEXT: cmp x9, x11 -; CHECK-NEXT: csel x9, x9, x11, gt -; CHECK-NEXT: cmp x10, x8 -; CHECK-NEXT: csel x8, x10, x8, lt -; CHECK-NEXT: cmp x8, x11 -; CHECK-NEXT: csel x8, x8, x11, gt -; CHECK-NEXT: fmov d0, x8 -; CHECK-NEXT: mov v0.d[1], x9 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f32_v2i50: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: mov s1, v0.s[1] +; CHECK-SD-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff +; CHECK-SD-NEXT: fcvtzs x10, s0 +; CHECK-SD-NEXT: mov x11, #-562949953421312 // =0xfffe000000000000 +; CHECK-SD-NEXT: fcvtzs x9, s1 +; CHECK-SD-NEXT: cmp x9, x8 +; CHECK-SD-NEXT: csel x9, x9, x8, lt +; CHECK-SD-NEXT: cmp x9, x11 +; CHECK-SD-NEXT: csel x9, x9, x11, gt +; CHECK-SD-NEXT: cmp x10, x8 +; CHECK-SD-NEXT: csel x8, x10, x8, lt +; CHECK-SD-NEXT: cmp x8, x11 +; CHECK-SD-NEXT: csel x8, x8, x11, gt +; CHECK-SD-NEXT: fmov d0, x8 +; CHECK-SD-NEXT: mov v0.d[1], x9 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f32_v2i50: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtl v0.2d, v0.2s +; CHECK-GI-NEXT: adrp x8, .LCPI32_1 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI32_1] +; CHECK-GI-NEXT: adrp x8, .LCPI32_0 +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: cmgt v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI32_0] +; CHECK-GI-NEXT: cmgt v2.2d, v0.2d, v1.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: ret %x = call <2 x i50> @llvm.fptosi.sat.v2f32.v2i50(<2 x float> %f) ret <2 x i50> %x } @@ -801,125 +1616,241 @@ define <2 x i64> @test_signed_v2f32_v2i64(<2 x float> %f) { } define <2 x i100> @test_signed_v2f32_v2i100(<2 x float> %f) { -; CHECK-LABEL: test_signed_v2f32_v2i100: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #80 -; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill -; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 80 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w30, -40 -; CHECK-NEXT: .cfi_offset b8, -48 -; CHECK-NEXT: .cfi_offset b9, -56 -; CHECK-NEXT: .cfi_offset b10, -64 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: movi v9.2s, #241, lsl #24 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w8, #1895825407 // =0x70ffffff -; CHECK-NEXT: fmov s10, w8 -; CHECK-NEXT: mov x21, #-34359738368 // =0xfffffff800000000 -; CHECK-NEXT: mov x22, #34359738367 // =0x7ffffffff -; CHECK-NEXT: mov s8, v0.s[1] -; CHECK-NEXT: fcmp s0, s9 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x21, x1, lt -; CHECK-NEXT: fcmp s0, s10 -; CHECK-NEXT: csel x9, x22, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s0, s0 -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: csel x19, xzr, x8, vs -; CHECK-NEXT: csel x20, xzr, x9, vs -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x21, x1, lt -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: mov x0, x19 -; CHECK-NEXT: mov x1, x20 -; CHECK-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: csel x9, x22, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload -; CHECK-NEXT: csel x2, xzr, x8, vs -; CHECK-NEXT: csel x3, xzr, x9, vs -; CHECK-NEXT: add sp, sp, #80 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f32_v2i100: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #80 +; CHECK-SD-NEXT: str d10, [sp, #16] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #40] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 80 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w30, -40 +; CHECK-SD-NEXT: .cfi_offset b8, -48 +; CHECK-SD-NEXT: .cfi_offset b9, -56 +; CHECK-SD-NEXT: .cfi_offset b10, -64 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: movi v9.2s, #241, lsl #24 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w8, #1895825407 // =0x70ffffff +; CHECK-SD-NEXT: fmov s10, w8 +; CHECK-SD-NEXT: mov x21, #-34359738368 // =0xfffffff800000000 +; CHECK-SD-NEXT: mov x22, #34359738367 // =0x7ffffffff +; CHECK-SD-NEXT: mov s8, v0.s[1] +; CHECK-SD-NEXT: fcmp s0, s9 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x21, x1, lt +; CHECK-SD-NEXT: fcmp s0, s10 +; CHECK-SD-NEXT: csel x9, x22, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s0, s0 +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: csel x19, xzr, x8, vs +; CHECK-SD-NEXT: csel x20, xzr, x9, vs +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x21, x1, lt +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: mov x0, x19 +; CHECK-SD-NEXT: mov x1, x20 +; CHECK-SD-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload +; CHECK-SD-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x9, x22, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x2, xzr, x8, vs +; CHECK-SD-NEXT: csel x3, xzr, x9, vs +; CHECK-SD-NEXT: add sp, sp, #80 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f32_v2i100: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #80 +; CHECK-GI-NEXT: str d10, [sp, #16] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #40] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 80 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w30, -40 +; CHECK-GI-NEXT: .cfi_offset b8, -48 +; CHECK-GI-NEXT: .cfi_offset b9, -56 +; CHECK-GI-NEXT: .cfi_offset b10, -64 +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: mov s8, v0.s[1] +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-GI-NEXT: bl __fixsfti +; CHECK-GI-NEXT: movi v9.2s, #241, lsl #24 +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w8, #1895825407 // =0x70ffffff +; CHECK-GI-NEXT: fmov s10, w8 +; CHECK-GI-NEXT: mov x21, #34359738368 // =0x800000000 +; CHECK-GI-NEXT: mov x22, #34359738367 // =0x7ffffffff +; CHECK-GI-NEXT: fcmp s0, s9 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x21, x1, lt +; CHECK-GI-NEXT: fcmp s0, s10 +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x22, x9, gt +; CHECK-GI-NEXT: fcmp s0, s0 +; CHECK-GI-NEXT: fmov s0, s8 +; CHECK-GI-NEXT: csel x19, xzr, x8, vs +; CHECK-GI-NEXT: csel x20, xzr, x9, vs +; CHECK-GI-NEXT: bl __fixsfti +; CHECK-GI-NEXT: fcmp s8, s9 +; CHECK-GI-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x21, x1, lt +; CHECK-GI-NEXT: fcmp s8, s10 +; CHECK-GI-NEXT: mov x0, x19 +; CHECK-GI-NEXT: mov x1, x20 +; CHECK-GI-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload +; CHECK-GI-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x22, x9, gt +; CHECK-GI-NEXT: fcmp s8, s8 +; CHECK-GI-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel x2, xzr, x8, vs +; CHECK-GI-NEXT: csel x3, xzr, x9, vs +; CHECK-GI-NEXT: add sp, sp, #80 +; CHECK-GI-NEXT: ret %x = call <2 x i100> @llvm.fptosi.sat.v2f32.v2i100(<2 x float> %f) ret <2 x i100> %x } define <2 x i128> @test_signed_v2f32_v2i128(<2 x float> %f) { -; CHECK-LABEL: test_signed_v2f32_v2i128: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #80 -; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill -; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 80 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w30, -40 -; CHECK-NEXT: .cfi_offset b8, -48 -; CHECK-NEXT: .cfi_offset b9, -56 -; CHECK-NEXT: .cfi_offset b10, -64 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: movi v9.2s, #255, lsl #24 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w8, #2130706431 // =0x7effffff -; CHECK-NEXT: fmov s10, w8 -; CHECK-NEXT: mov x21, #-9223372036854775808 // =0x8000000000000000 -; CHECK-NEXT: mov x22, #9223372036854775807 // =0x7fffffffffffffff -; CHECK-NEXT: mov s8, v0.s[1] -; CHECK-NEXT: fcmp s0, s9 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x21, x1, lt -; CHECK-NEXT: fcmp s0, s10 -; CHECK-NEXT: csel x9, x22, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s0, s0 -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: csel x19, xzr, x8, vs -; CHECK-NEXT: csel x20, xzr, x9, vs -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x21, x1, lt -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: mov x0, x19 -; CHECK-NEXT: mov x1, x20 -; CHECK-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: csel x9, x22, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload -; CHECK-NEXT: csel x2, xzr, x8, vs -; CHECK-NEXT: csel x3, xzr, x9, vs -; CHECK-NEXT: add sp, sp, #80 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f32_v2i128: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #80 +; CHECK-SD-NEXT: str d10, [sp, #16] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #40] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 80 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w30, -40 +; CHECK-SD-NEXT: .cfi_offset b8, -48 +; CHECK-SD-NEXT: .cfi_offset b9, -56 +; CHECK-SD-NEXT: .cfi_offset b10, -64 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: movi v9.2s, #255, lsl #24 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w8, #2130706431 // =0x7effffff +; CHECK-SD-NEXT: fmov s10, w8 +; CHECK-SD-NEXT: mov x21, #-9223372036854775808 // =0x8000000000000000 +; CHECK-SD-NEXT: mov x22, #9223372036854775807 // =0x7fffffffffffffff +; CHECK-SD-NEXT: mov s8, v0.s[1] +; CHECK-SD-NEXT: fcmp s0, s9 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x21, x1, lt +; CHECK-SD-NEXT: fcmp s0, s10 +; CHECK-SD-NEXT: csel x9, x22, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s0, s0 +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: csel x19, xzr, x8, vs +; CHECK-SD-NEXT: csel x20, xzr, x9, vs +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x21, x1, lt +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: mov x0, x19 +; CHECK-SD-NEXT: mov x1, x20 +; CHECK-SD-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload +; CHECK-SD-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x9, x22, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x2, xzr, x8, vs +; CHECK-SD-NEXT: csel x3, xzr, x9, vs +; CHECK-SD-NEXT: add sp, sp, #80 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f32_v2i128: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #80 +; CHECK-GI-NEXT: str d10, [sp, #16] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #40] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 80 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w30, -40 +; CHECK-GI-NEXT: .cfi_offset b8, -48 +; CHECK-GI-NEXT: .cfi_offset b9, -56 +; CHECK-GI-NEXT: .cfi_offset b10, -64 +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: mov s8, v0.s[1] +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-GI-NEXT: bl __fixsfti +; CHECK-GI-NEXT: movi v9.2s, #255, lsl #24 +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w8, #2130706431 // =0x7effffff +; CHECK-GI-NEXT: fmov s10, w8 +; CHECK-GI-NEXT: mov x21, #-9223372036854775808 // =0x8000000000000000 +; CHECK-GI-NEXT: mov x22, #9223372036854775807 // =0x7fffffffffffffff +; CHECK-GI-NEXT: fcmp s0, s9 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x21, x1, lt +; CHECK-GI-NEXT: fcmp s0, s10 +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x22, x9, gt +; CHECK-GI-NEXT: fcmp s0, s0 +; CHECK-GI-NEXT: fmov s0, s8 +; CHECK-GI-NEXT: csel x19, xzr, x8, vs +; CHECK-GI-NEXT: csel x20, xzr, x9, vs +; CHECK-GI-NEXT: bl __fixsfti +; CHECK-GI-NEXT: fcmp s8, s9 +; CHECK-GI-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x21, x1, lt +; CHECK-GI-NEXT: fcmp s8, s10 +; CHECK-GI-NEXT: mov x0, x19 +; CHECK-GI-NEXT: mov x1, x20 +; CHECK-GI-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload +; CHECK-GI-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x22, x9, gt +; CHECK-GI-NEXT: fcmp s8, s8 +; CHECK-GI-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel x2, xzr, x8, vs +; CHECK-GI-NEXT: csel x3, xzr, x9, vs +; CHECK-GI-NEXT: add sp, sp, #80 +; CHECK-GI-NEXT: ret %x = call <2 x i128> @llvm.fptosi.sat.v2f32.v2i128(<2 x float> %f) ret <2 x i128> %x } @@ -939,15 +1870,25 @@ declare <4 x i100> @llvm.fptosi.sat.v4f32.v4i100(<4 x float>) declare <4 x i128> @llvm.fptosi.sat.v4f32.v4i128(<4 x float>) define <4 x i1> @test_signed_v4f32_v4i1(<4 x float> %f) { -; CHECK-LABEL: test_signed_v4f32_v4i1: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.2d, #0000000000000000 -; CHECK-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-NEXT: smin v0.4s, v0.4s, v1.4s -; CHECK-NEXT: movi v1.2d, #0xffffffffffffffff -; CHECK-NEXT: smax v0.4s, v0.4s, v1.4s -; CHECK-NEXT: xtn v0.4h, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v4f32_v4i1: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v1.2d, #0000000000000000 +; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: movi v1.2d, #0xffffffffffffffff +; CHECK-SD-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v4f32_v4i1: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v1.2d, #0000000000000000 +; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-NEXT: movi v2.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: smax v0.4s, v0.4s, v2.4s +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: ret %x = call <4 x i1> @llvm.fptosi.sat.v4f32.v4i1(<4 x float> %f) ret <4 x i1> %x } @@ -981,11 +1922,21 @@ define <4 x i13> @test_signed_v4f32_v4i13(<4 x float> %f) { } define <4 x i16> @test_signed_v4f32_v4i16(<4 x float> %f) { -; CHECK-LABEL: test_signed_v4f32_v4i16: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-NEXT: sqxtn v0.4h, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v4f32_v4i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-NEXT: sqxtn v0.4h, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v4f32_v4i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v1.4s, #127, msl #8 +; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: mvni v1.4s, #127, msl #8 +; CHECK-GI-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: ret %x = call <4 x i16> @llvm.fptosi.sat.v4f32.v4i16(<4 x float> %f) ret <4 x i16> %x } @@ -1046,213 +1997,415 @@ define <4 x i50> @test_signed_v4f32_v4i50(<4 x float> %f) { } define <4 x i64> @test_signed_v4f32_v4i64(<4 x float> %f) { -; CHECK-LABEL: test_signed_v4f32_v4i64: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl2 v1.2d, v0.4s -; CHECK-NEXT: fcvtl v0.2d, v0.2s -; CHECK-NEXT: fcvtzs v1.2d, v1.2d -; CHECK-NEXT: fcvtzs v0.2d, v0.2d -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v4f32_v4i64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtl2 v1.2d, v0.4s +; CHECK-SD-NEXT: fcvtl v0.2d, v0.2s +; CHECK-SD-NEXT: fcvtzs v1.2d, v1.2d +; CHECK-SD-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v4f32_v4i64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtl v1.2d, v0.2s +; CHECK-GI-NEXT: fcvtl2 v2.2d, v0.4s +; CHECK-GI-NEXT: fcvtzs v0.2d, v1.2d +; CHECK-GI-NEXT: fcvtzs v1.2d, v2.2d +; CHECK-GI-NEXT: ret %x = call <4 x i64> @llvm.fptosi.sat.v4f32.v4i64(<4 x float> %f) ret <4 x i64> %x } define <4 x i100> @test_signed_v4f32_v4i100(<4 x float> %f) { -; CHECK-LABEL: test_signed_v4f32_v4i100: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #112 -; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill -; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill -; CHECK-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 112 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w23, -40 -; CHECK-NEXT: .cfi_offset w24, -48 -; CHECK-NEXT: .cfi_offset w25, -56 -; CHECK-NEXT: .cfi_offset w26, -64 -; CHECK-NEXT: .cfi_offset w30, -72 -; CHECK-NEXT: .cfi_offset b8, -80 -; CHECK-NEXT: .cfi_offset b9, -88 -; CHECK-NEXT: .cfi_offset b10, -96 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: movi v9.2s, #241, lsl #24 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w8, #1895825407 // =0x70ffffff -; CHECK-NEXT: fmov s10, w8 -; CHECK-NEXT: mov x25, #-34359738368 // =0xfffffff800000000 -; CHECK-NEXT: mov x26, #34359738367 // =0x7ffffffff -; CHECK-NEXT: mov s8, v0.s[1] -; CHECK-NEXT: fcmp s0, s9 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s0, s10 -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s0, s0 -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: csel x19, xzr, x8, vs -; CHECK-NEXT: csel x20, xzr, x9, vs -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: csel x21, xzr, x8, vs -; CHECK-NEXT: csel x22, xzr, x9, vs -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: fcmp s0, s9 -; CHECK-NEXT: mov s8, v0.s[1] -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s0, s10 -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s0, s0 -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: csel x23, xzr, x8, vs -; CHECK-NEXT: csel x24, xzr, x9, vs -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: mov x2, x21 -; CHECK-NEXT: mov x3, x22 -; CHECK-NEXT: mov x4, x23 -; CHECK-NEXT: mov x5, x24 -; CHECK-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload -; CHECK-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: mov x0, x19 -; CHECK-NEXT: mov x1, x20 -; CHECK-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload -; CHECK-NEXT: csel x6, xzr, x8, vs -; CHECK-NEXT: csel x7, xzr, x9, vs -; CHECK-NEXT: add sp, sp, #112 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v4f32_v4i100: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #112 +; CHECK-SD-NEXT: str d10, [sp, #16] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #40] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 112 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w23, -40 +; CHECK-SD-NEXT: .cfi_offset w24, -48 +; CHECK-SD-NEXT: .cfi_offset w25, -56 +; CHECK-SD-NEXT: .cfi_offset w26, -64 +; CHECK-SD-NEXT: .cfi_offset w30, -72 +; CHECK-SD-NEXT: .cfi_offset b8, -80 +; CHECK-SD-NEXT: .cfi_offset b9, -88 +; CHECK-SD-NEXT: .cfi_offset b10, -96 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: movi v9.2s, #241, lsl #24 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w8, #1895825407 // =0x70ffffff +; CHECK-SD-NEXT: fmov s10, w8 +; CHECK-SD-NEXT: mov x25, #-34359738368 // =0xfffffff800000000 +; CHECK-SD-NEXT: mov x26, #34359738367 // =0x7ffffffff +; CHECK-SD-NEXT: mov s8, v0.s[1] +; CHECK-SD-NEXT: fcmp s0, s9 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s0, s10 +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s0, s0 +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: csel x19, xzr, x8, vs +; CHECK-SD-NEXT: csel x20, xzr, x9, vs +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-SD-NEXT: csel x21, xzr, x8, vs +; CHECK-SD-NEXT: csel x22, xzr, x9, vs +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: fcmp s0, s9 +; CHECK-SD-NEXT: mov s8, v0.s[1] +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s0, s10 +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s0, s0 +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: csel x23, xzr, x8, vs +; CHECK-SD-NEXT: csel x24, xzr, x9, vs +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: mov x2, x21 +; CHECK-SD-NEXT: mov x3, x22 +; CHECK-SD-NEXT: mov x4, x23 +; CHECK-SD-NEXT: mov x5, x24 +; CHECK-SD-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload +; CHECK-SD-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: mov x0, x19 +; CHECK-SD-NEXT: mov x1, x20 +; CHECK-SD-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload +; CHECK-SD-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x6, xzr, x8, vs +; CHECK-SD-NEXT: csel x7, xzr, x9, vs +; CHECK-SD-NEXT: add sp, sp, #112 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v4f32_v4i100: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #128 +; CHECK-GI-NEXT: str d12, [sp, #16] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp d11, d10, [sp, #24] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp d9, d8, [sp, #40] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #56] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x26, x25, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x24, x23, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 128 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w23, -40 +; CHECK-GI-NEXT: .cfi_offset w24, -48 +; CHECK-GI-NEXT: .cfi_offset w25, -56 +; CHECK-GI-NEXT: .cfi_offset w26, -64 +; CHECK-GI-NEXT: .cfi_offset w30, -72 +; CHECK-GI-NEXT: .cfi_offset b8, -80 +; CHECK-GI-NEXT: .cfi_offset b9, -88 +; CHECK-GI-NEXT: .cfi_offset b10, -96 +; CHECK-GI-NEXT: .cfi_offset b11, -104 +; CHECK-GI-NEXT: .cfi_offset b12, -112 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: mov s9, v0.s[1] +; CHECK-GI-NEXT: mov s10, v0.s[2] +; CHECK-GI-NEXT: mov s8, v0.s[3] +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-GI-NEXT: bl __fixsfti +; CHECK-GI-NEXT: movi v11.2s, #241, lsl #24 +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w8, #1895825407 // =0x70ffffff +; CHECK-GI-NEXT: fmov s12, w8 +; CHECK-GI-NEXT: mov x25, #34359738368 // =0x800000000 +; CHECK-GI-NEXT: mov x26, #34359738367 // =0x7ffffffff +; CHECK-GI-NEXT: fcmp s0, s11 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x25, x1, lt +; CHECK-GI-NEXT: fcmp s0, s12 +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x26, x9, gt +; CHECK-GI-NEXT: fcmp s0, s0 +; CHECK-GI-NEXT: fmov s0, s9 +; CHECK-GI-NEXT: csel x19, xzr, x8, vs +; CHECK-GI-NEXT: csel x20, xzr, x9, vs +; CHECK-GI-NEXT: bl __fixsfti +; CHECK-GI-NEXT: fcmp s9, s11 +; CHECK-GI-NEXT: fmov s0, s10 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x25, x1, lt +; CHECK-GI-NEXT: fcmp s9, s12 +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x26, x9, gt +; CHECK-GI-NEXT: fcmp s9, s9 +; CHECK-GI-NEXT: csel x21, xzr, x8, vs +; CHECK-GI-NEXT: csel x22, xzr, x9, vs +; CHECK-GI-NEXT: bl __fixsfti +; CHECK-GI-NEXT: fcmp s10, s11 +; CHECK-GI-NEXT: fmov s0, s8 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x25, x1, lt +; CHECK-GI-NEXT: fcmp s10, s12 +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x26, x9, gt +; CHECK-GI-NEXT: fcmp s10, s10 +; CHECK-GI-NEXT: csel x23, xzr, x8, vs +; CHECK-GI-NEXT: csel x24, xzr, x9, vs +; CHECK-GI-NEXT: bl __fixsfti +; CHECK-GI-NEXT: fcmp s8, s11 +; CHECK-GI-NEXT: mov x2, x21 +; CHECK-GI-NEXT: mov x3, x22 +; CHECK-GI-NEXT: mov x4, x23 +; CHECK-GI-NEXT: mov x5, x24 +; CHECK-GI-NEXT: ldr x30, [sp, #56] // 8-byte Folded Reload +; CHECK-GI-NEXT: ldp x22, x21, [sp, #96] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x25, x1, lt +; CHECK-GI-NEXT: fcmp s8, s12 +; CHECK-GI-NEXT: mov x0, x19 +; CHECK-GI-NEXT: mov x1, x20 +; CHECK-GI-NEXT: ldr d12, [sp, #16] // 8-byte Folded Reload +; CHECK-GI-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x26, x9, gt +; CHECK-GI-NEXT: fcmp s8, s8 +; CHECK-GI-NEXT: ldp x24, x23, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x26, x25, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #40] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel x6, xzr, x8, vs +; CHECK-GI-NEXT: ldp d11, d10, [sp, #24] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel x7, xzr, x9, vs +; CHECK-GI-NEXT: add sp, sp, #128 +; CHECK-GI-NEXT: ret %x = call <4 x i100> @llvm.fptosi.sat.v4f32.v4i100(<4 x float> %f) ret <4 x i100> %x } define <4 x i128> @test_signed_v4f32_v4i128(<4 x float> %f) { -; CHECK-LABEL: test_signed_v4f32_v4i128: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #112 -; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill -; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill -; CHECK-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 112 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w23, -40 -; CHECK-NEXT: .cfi_offset w24, -48 -; CHECK-NEXT: .cfi_offset w25, -56 -; CHECK-NEXT: .cfi_offset w26, -64 -; CHECK-NEXT: .cfi_offset w30, -72 -; CHECK-NEXT: .cfi_offset b8, -80 -; CHECK-NEXT: .cfi_offset b9, -88 -; CHECK-NEXT: .cfi_offset b10, -96 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: movi v9.2s, #255, lsl #24 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w8, #2130706431 // =0x7effffff -; CHECK-NEXT: fmov s10, w8 -; CHECK-NEXT: mov x25, #-9223372036854775808 // =0x8000000000000000 -; CHECK-NEXT: mov x26, #9223372036854775807 // =0x7fffffffffffffff -; CHECK-NEXT: mov s8, v0.s[1] -; CHECK-NEXT: fcmp s0, s9 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s0, s10 -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s0, s0 -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: csel x19, xzr, x8, vs -; CHECK-NEXT: csel x20, xzr, x9, vs -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: csel x21, xzr, x8, vs -; CHECK-NEXT: csel x22, xzr, x9, vs -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: fcmp s0, s9 -; CHECK-NEXT: mov s8, v0.s[1] -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s0, s10 -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s0, s0 -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: csel x23, xzr, x8, vs -; CHECK-NEXT: csel x24, xzr, x9, vs -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: mov x2, x21 -; CHECK-NEXT: mov x3, x22 -; CHECK-NEXT: mov x4, x23 -; CHECK-NEXT: mov x5, x24 -; CHECK-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload -; CHECK-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: mov x0, x19 -; CHECK-NEXT: mov x1, x20 -; CHECK-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload -; CHECK-NEXT: csel x6, xzr, x8, vs -; CHECK-NEXT: csel x7, xzr, x9, vs -; CHECK-NEXT: add sp, sp, #112 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v4f32_v4i128: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #112 +; CHECK-SD-NEXT: str d10, [sp, #16] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #40] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 112 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w23, -40 +; CHECK-SD-NEXT: .cfi_offset w24, -48 +; CHECK-SD-NEXT: .cfi_offset w25, -56 +; CHECK-SD-NEXT: .cfi_offset w26, -64 +; CHECK-SD-NEXT: .cfi_offset w30, -72 +; CHECK-SD-NEXT: .cfi_offset b8, -80 +; CHECK-SD-NEXT: .cfi_offset b9, -88 +; CHECK-SD-NEXT: .cfi_offset b10, -96 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: movi v9.2s, #255, lsl #24 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w8, #2130706431 // =0x7effffff +; CHECK-SD-NEXT: fmov s10, w8 +; CHECK-SD-NEXT: mov x25, #-9223372036854775808 // =0x8000000000000000 +; CHECK-SD-NEXT: mov x26, #9223372036854775807 // =0x7fffffffffffffff +; CHECK-SD-NEXT: mov s8, v0.s[1] +; CHECK-SD-NEXT: fcmp s0, s9 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s0, s10 +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s0, s0 +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: csel x19, xzr, x8, vs +; CHECK-SD-NEXT: csel x20, xzr, x9, vs +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-SD-NEXT: csel x21, xzr, x8, vs +; CHECK-SD-NEXT: csel x22, xzr, x9, vs +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: fcmp s0, s9 +; CHECK-SD-NEXT: mov s8, v0.s[1] +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s0, s10 +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s0, s0 +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: csel x23, xzr, x8, vs +; CHECK-SD-NEXT: csel x24, xzr, x9, vs +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: mov x2, x21 +; CHECK-SD-NEXT: mov x3, x22 +; CHECK-SD-NEXT: mov x4, x23 +; CHECK-SD-NEXT: mov x5, x24 +; CHECK-SD-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload +; CHECK-SD-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: mov x0, x19 +; CHECK-SD-NEXT: mov x1, x20 +; CHECK-SD-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload +; CHECK-SD-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x6, xzr, x8, vs +; CHECK-SD-NEXT: csel x7, xzr, x9, vs +; CHECK-SD-NEXT: add sp, sp, #112 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v4f32_v4i128: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #128 +; CHECK-GI-NEXT: str d12, [sp, #16] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp d11, d10, [sp, #24] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp d9, d8, [sp, #40] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #56] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x26, x25, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x24, x23, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 128 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w23, -40 +; CHECK-GI-NEXT: .cfi_offset w24, -48 +; CHECK-GI-NEXT: .cfi_offset w25, -56 +; CHECK-GI-NEXT: .cfi_offset w26, -64 +; CHECK-GI-NEXT: .cfi_offset w30, -72 +; CHECK-GI-NEXT: .cfi_offset b8, -80 +; CHECK-GI-NEXT: .cfi_offset b9, -88 +; CHECK-GI-NEXT: .cfi_offset b10, -96 +; CHECK-GI-NEXT: .cfi_offset b11, -104 +; CHECK-GI-NEXT: .cfi_offset b12, -112 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: mov s9, v0.s[1] +; CHECK-GI-NEXT: mov s10, v0.s[2] +; CHECK-GI-NEXT: mov s8, v0.s[3] +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-GI-NEXT: bl __fixsfti +; CHECK-GI-NEXT: movi v11.2s, #255, lsl #24 +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w8, #2130706431 // =0x7effffff +; CHECK-GI-NEXT: fmov s12, w8 +; CHECK-GI-NEXT: mov x25, #-9223372036854775808 // =0x8000000000000000 +; CHECK-GI-NEXT: mov x26, #9223372036854775807 // =0x7fffffffffffffff +; CHECK-GI-NEXT: fcmp s0, s11 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x25, x1, lt +; CHECK-GI-NEXT: fcmp s0, s12 +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x26, x9, gt +; CHECK-GI-NEXT: fcmp s0, s0 +; CHECK-GI-NEXT: fmov s0, s9 +; CHECK-GI-NEXT: csel x19, xzr, x8, vs +; CHECK-GI-NEXT: csel x20, xzr, x9, vs +; CHECK-GI-NEXT: bl __fixsfti +; CHECK-GI-NEXT: fcmp s9, s11 +; CHECK-GI-NEXT: fmov s0, s10 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x25, x1, lt +; CHECK-GI-NEXT: fcmp s9, s12 +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x26, x9, gt +; CHECK-GI-NEXT: fcmp s9, s9 +; CHECK-GI-NEXT: csel x21, xzr, x8, vs +; CHECK-GI-NEXT: csel x22, xzr, x9, vs +; CHECK-GI-NEXT: bl __fixsfti +; CHECK-GI-NEXT: fcmp s10, s11 +; CHECK-GI-NEXT: fmov s0, s8 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x25, x1, lt +; CHECK-GI-NEXT: fcmp s10, s12 +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x26, x9, gt +; CHECK-GI-NEXT: fcmp s10, s10 +; CHECK-GI-NEXT: csel x23, xzr, x8, vs +; CHECK-GI-NEXT: csel x24, xzr, x9, vs +; CHECK-GI-NEXT: bl __fixsfti +; CHECK-GI-NEXT: fcmp s8, s11 +; CHECK-GI-NEXT: mov x2, x21 +; CHECK-GI-NEXT: mov x3, x22 +; CHECK-GI-NEXT: mov x4, x23 +; CHECK-GI-NEXT: mov x5, x24 +; CHECK-GI-NEXT: ldr x30, [sp, #56] // 8-byte Folded Reload +; CHECK-GI-NEXT: ldp x22, x21, [sp, #96] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x25, x1, lt +; CHECK-GI-NEXT: fcmp s8, s12 +; CHECK-GI-NEXT: mov x0, x19 +; CHECK-GI-NEXT: mov x1, x20 +; CHECK-GI-NEXT: ldr d12, [sp, #16] // 8-byte Folded Reload +; CHECK-GI-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x26, x9, gt +; CHECK-GI-NEXT: fcmp s8, s8 +; CHECK-GI-NEXT: ldp x24, x23, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x26, x25, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #40] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel x6, xzr, x8, vs +; CHECK-GI-NEXT: ldp d11, d10, [sp, #24] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel x7, xzr, x9, vs +; CHECK-GI-NEXT: add sp, sp, #128 +; CHECK-GI-NEXT: ret %x = call <4 x i128> @llvm.fptosi.sat.v4f32.v4i128(<4 x float> %f) ret <4 x i128> %x } @@ -1272,152 +2425,246 @@ declare <2 x i100> @llvm.fptosi.sat.v2f64.v2i100(<2 x double>) declare <2 x i128> @llvm.fptosi.sat.v2f64.v2i128(<2 x double>) define <2 x i1> @test_signed_v2f64_v2i1(<2 x double> %f) { -; CHECK-LABEL: test_signed_v2f64_v2i1: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: fcvtzs w9, d0 -; CHECK-NEXT: fcvtzs w8, d1 -; CHECK-NEXT: ands w8, w8, w8, asr #31 -; CHECK-NEXT: csinv w8, w8, wzr, ge -; CHECK-NEXT: ands w9, w9, w9, asr #31 -; CHECK-NEXT: csinv w9, w9, wzr, ge -; CHECK-NEXT: fmov s0, w9 -; CHECK-NEXT: mov v0.s[1], w8 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f64_v2i1: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: fcvtzs w9, d0 +; CHECK-SD-NEXT: fcvtzs w8, d1 +; CHECK-SD-NEXT: ands w8, w8, w8, asr #31 +; CHECK-SD-NEXT: csinv w8, w8, wzr, ge +; CHECK-SD-NEXT: ands w9, w9, w9, asr #31 +; CHECK-SD-NEXT: csinv w9, w9, wzr, ge +; CHECK-SD-NEXT: fmov s0, w9 +; CHECK-SD-NEXT: mov v0.s[1], w8 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f64_v2i1: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: movi v2.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: cmlt v1.2d, v0.2d, #0 +; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-GI-NEXT: cmgt v1.2d, v0.2d, v2.2d +; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: ret %x = call <2 x i1> @llvm.fptosi.sat.v2f64.v2i1(<2 x double> %f) ret <2 x i1> %x } define <2 x i8> @test_signed_v2f64_v2i8(<2 x double> %f) { -; CHECK-LABEL: test_signed_v2f64_v2i8: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: fcvtzs w10, d0 -; CHECK-NEXT: mov w8, #127 // =0x7f -; CHECK-NEXT: mov w11, #-128 // =0xffffff80 -; CHECK-NEXT: fcvtzs w9, d1 -; CHECK-NEXT: cmp w9, #127 -; CHECK-NEXT: csel w9, w9, w8, lt -; CHECK-NEXT: cmn w9, #128 -; CHECK-NEXT: csel w9, w9, w11, gt -; CHECK-NEXT: cmp w10, #127 -; CHECK-NEXT: csel w8, w10, w8, lt -; CHECK-NEXT: cmn w8, #128 -; CHECK-NEXT: csel w8, w8, w11, gt -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov v0.s[1], w9 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f64_v2i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: fcvtzs w10, d0 +; CHECK-SD-NEXT: mov w8, #127 // =0x7f +; CHECK-SD-NEXT: mov w11, #-128 // =0xffffff80 +; CHECK-SD-NEXT: fcvtzs w9, d1 +; CHECK-SD-NEXT: cmp w9, #127 +; CHECK-SD-NEXT: csel w9, w9, w8, lt +; CHECK-SD-NEXT: cmn w9, #128 +; CHECK-SD-NEXT: csel w9, w9, w11, gt +; CHECK-SD-NEXT: cmp w10, #127 +; CHECK-SD-NEXT: csel w8, w10, w8, lt +; CHECK-SD-NEXT: cmn w8, #128 +; CHECK-SD-NEXT: csel w8, w8, w11, gt +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov v0.s[1], w9 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f64_v2i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: adrp x8, .LCPI47_1 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI47_1] +; CHECK-GI-NEXT: adrp x8, .LCPI47_0 +; CHECK-GI-NEXT: cmgt v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI47_0] +; CHECK-GI-NEXT: cmgt v2.2d, v0.2d, v1.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: ret %x = call <2 x i8> @llvm.fptosi.sat.v2f64.v2i8(<2 x double> %f) ret <2 x i8> %x } define <2 x i13> @test_signed_v2f64_v2i13(<2 x double> %f) { -; CHECK-LABEL: test_signed_v2f64_v2i13: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: fcvtzs w10, d0 -; CHECK-NEXT: mov w8, #4095 // =0xfff -; CHECK-NEXT: mov w11, #-4096 // =0xfffff000 -; CHECK-NEXT: fcvtzs w9, d1 -; CHECK-NEXT: cmp w9, #4095 -; CHECK-NEXT: csel w9, w9, w8, lt -; CHECK-NEXT: cmn w9, #1, lsl #12 // =4096 -; CHECK-NEXT: csel w9, w9, w11, gt -; CHECK-NEXT: cmp w10, #4095 -; CHECK-NEXT: csel w8, w10, w8, lt -; CHECK-NEXT: cmn w8, #1, lsl #12 // =4096 -; CHECK-NEXT: csel w8, w8, w11, gt -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov v0.s[1], w9 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f64_v2i13: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: fcvtzs w10, d0 +; CHECK-SD-NEXT: mov w8, #4095 // =0xfff +; CHECK-SD-NEXT: mov w11, #-4096 // =0xfffff000 +; CHECK-SD-NEXT: fcvtzs w9, d1 +; CHECK-SD-NEXT: cmp w9, #4095 +; CHECK-SD-NEXT: csel w9, w9, w8, lt +; CHECK-SD-NEXT: cmn w9, #1, lsl #12 // =4096 +; CHECK-SD-NEXT: csel w9, w9, w11, gt +; CHECK-SD-NEXT: cmp w10, #4095 +; CHECK-SD-NEXT: csel w8, w10, w8, lt +; CHECK-SD-NEXT: cmn w8, #1, lsl #12 // =4096 +; CHECK-SD-NEXT: csel w8, w8, w11, gt +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov v0.s[1], w9 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f64_v2i13: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: adrp x8, .LCPI48_1 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI48_1] +; CHECK-GI-NEXT: adrp x8, .LCPI48_0 +; CHECK-GI-NEXT: cmgt v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI48_0] +; CHECK-GI-NEXT: cmgt v2.2d, v0.2d, v1.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: ret %x = call <2 x i13> @llvm.fptosi.sat.v2f64.v2i13(<2 x double> %f) ret <2 x i13> %x } define <2 x i16> @test_signed_v2f64_v2i16(<2 x double> %f) { -; CHECK-LABEL: test_signed_v2f64_v2i16: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: mov w8, #32767 // =0x7fff -; CHECK-NEXT: fcvtzs w10, d0 -; CHECK-NEXT: mov w11, #-32768 // =0xffff8000 -; CHECK-NEXT: fcvtzs w9, d1 -; CHECK-NEXT: cmp w9, w8 -; CHECK-NEXT: csel w9, w9, w8, lt -; CHECK-NEXT: cmn w9, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w9, w9, w11, gt -; CHECK-NEXT: cmp w10, w8 -; CHECK-NEXT: csel w8, w10, w8, lt -; CHECK-NEXT: cmn w8, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w8, w8, w11, gt -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov v0.s[1], w9 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f64_v2i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: mov w8, #32767 // =0x7fff +; CHECK-SD-NEXT: fcvtzs w10, d0 +; CHECK-SD-NEXT: mov w11, #-32768 // =0xffff8000 +; CHECK-SD-NEXT: fcvtzs w9, d1 +; CHECK-SD-NEXT: cmp w9, w8 +; CHECK-SD-NEXT: csel w9, w9, w8, lt +; CHECK-SD-NEXT: cmn w9, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w9, w9, w11, gt +; CHECK-SD-NEXT: cmp w10, w8 +; CHECK-SD-NEXT: csel w8, w10, w8, lt +; CHECK-SD-NEXT: cmn w8, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w8, w8, w11, gt +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov v0.s[1], w9 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f64_v2i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: adrp x8, .LCPI49_1 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI49_1] +; CHECK-GI-NEXT: adrp x8, .LCPI49_0 +; CHECK-GI-NEXT: cmgt v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI49_0] +; CHECK-GI-NEXT: cmgt v2.2d, v0.2d, v1.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: ret %x = call <2 x i16> @llvm.fptosi.sat.v2f64.v2i16(<2 x double> %f) ret <2 x i16> %x } define <2 x i19> @test_signed_v2f64_v2i19(<2 x double> %f) { -; CHECK-LABEL: test_signed_v2f64_v2i19: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: mov w8, #262143 // =0x3ffff -; CHECK-NEXT: fcvtzs w10, d0 -; CHECK-NEXT: mov w11, #-262144 // =0xfffc0000 -; CHECK-NEXT: fcvtzs w9, d1 -; CHECK-NEXT: cmp w9, w8 -; CHECK-NEXT: csel w9, w9, w8, lt -; CHECK-NEXT: cmn w9, #64, lsl #12 // =262144 -; CHECK-NEXT: csel w9, w9, w11, gt -; CHECK-NEXT: cmp w10, w8 -; CHECK-NEXT: csel w8, w10, w8, lt -; CHECK-NEXT: cmn w8, #64, lsl #12 // =262144 -; CHECK-NEXT: csel w8, w8, w11, gt -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov v0.s[1], w9 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f64_v2i19: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: mov w8, #262143 // =0x3ffff +; CHECK-SD-NEXT: fcvtzs w10, d0 +; CHECK-SD-NEXT: mov w11, #-262144 // =0xfffc0000 +; CHECK-SD-NEXT: fcvtzs w9, d1 +; CHECK-SD-NEXT: cmp w9, w8 +; CHECK-SD-NEXT: csel w9, w9, w8, lt +; CHECK-SD-NEXT: cmn w9, #64, lsl #12 // =262144 +; CHECK-SD-NEXT: csel w9, w9, w11, gt +; CHECK-SD-NEXT: cmp w10, w8 +; CHECK-SD-NEXT: csel w8, w10, w8, lt +; CHECK-SD-NEXT: cmn w8, #64, lsl #12 // =262144 +; CHECK-SD-NEXT: csel w8, w8, w11, gt +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov v0.s[1], w9 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f64_v2i19: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: adrp x8, .LCPI50_1 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI50_1] +; CHECK-GI-NEXT: adrp x8, .LCPI50_0 +; CHECK-GI-NEXT: cmgt v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI50_0] +; CHECK-GI-NEXT: cmgt v2.2d, v0.2d, v1.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: ret %x = call <2 x i19> @llvm.fptosi.sat.v2f64.v2i19(<2 x double> %f) ret <2 x i19> %x } define <2 x i32> @test_signed_v2f64_v2i32_duplicate(<2 x double> %f) { -; CHECK-LABEL: test_signed_v2f64_v2i32_duplicate: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: fcvtzs w8, d0 -; CHECK-NEXT: fcvtzs w9, d1 -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov v0.s[1], w9 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f64_v2i32_duplicate: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: fcvtzs w8, d0 +; CHECK-SD-NEXT: fcvtzs w9, d1 +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov v0.s[1], w9 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f64_v2i32_duplicate: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: adrp x8, .LCPI51_1 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI51_1] +; CHECK-GI-NEXT: adrp x8, .LCPI51_0 +; CHECK-GI-NEXT: cmgt v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI51_0] +; CHECK-GI-NEXT: cmgt v2.2d, v0.2d, v1.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: ret %x = call <2 x i32> @llvm.fptosi.sat.v2f64.v2i32(<2 x double> %f) ret <2 x i32> %x } define <2 x i50> @test_signed_v2f64_v2i50(<2 x double> %f) { -; CHECK-LABEL: test_signed_v2f64_v2i50: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff -; CHECK-NEXT: fcvtzs x10, d0 -; CHECK-NEXT: mov x11, #-562949953421312 // =0xfffe000000000000 -; CHECK-NEXT: fcvtzs x9, d1 -; CHECK-NEXT: cmp x9, x8 -; CHECK-NEXT: csel x9, x9, x8, lt -; CHECK-NEXT: cmp x9, x11 -; CHECK-NEXT: csel x9, x9, x11, gt -; CHECK-NEXT: cmp x10, x8 -; CHECK-NEXT: csel x8, x10, x8, lt -; CHECK-NEXT: cmp x8, x11 -; CHECK-NEXT: csel x8, x8, x11, gt -; CHECK-NEXT: fmov d0, x8 -; CHECK-NEXT: mov v0.d[1], x9 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f64_v2i50: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff +; CHECK-SD-NEXT: fcvtzs x10, d0 +; CHECK-SD-NEXT: mov x11, #-562949953421312 // =0xfffe000000000000 +; CHECK-SD-NEXT: fcvtzs x9, d1 +; CHECK-SD-NEXT: cmp x9, x8 +; CHECK-SD-NEXT: csel x9, x9, x8, lt +; CHECK-SD-NEXT: cmp x9, x11 +; CHECK-SD-NEXT: csel x9, x9, x11, gt +; CHECK-SD-NEXT: cmp x10, x8 +; CHECK-SD-NEXT: csel x8, x10, x8, lt +; CHECK-SD-NEXT: cmp x8, x11 +; CHECK-SD-NEXT: csel x8, x8, x11, gt +; CHECK-SD-NEXT: fmov d0, x8 +; CHECK-SD-NEXT: mov v0.d[1], x9 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f64_v2i50: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: adrp x8, .LCPI52_1 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI52_1] +; CHECK-GI-NEXT: adrp x8, .LCPI52_0 +; CHECK-GI-NEXT: cmgt v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI52_0] +; CHECK-GI-NEXT: cmgt v2.2d, v0.2d, v1.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: ret %x = call <2 x i50> @llvm.fptosi.sat.v2f64.v2i50(<2 x double> %f) ret <2 x i50> %x } @@ -1432,125 +2679,241 @@ define <2 x i64> @test_signed_v2f64_v2i64(<2 x double> %f) { } define <2 x i100> @test_signed_v2f64_v2i100(<2 x double> %f) { -; CHECK-LABEL: test_signed_v2f64_v2i100: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #80 -; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill -; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 80 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w30, -40 -; CHECK-NEXT: .cfi_offset b8, -48 -; CHECK-NEXT: .cfi_offset b9, -56 -; CHECK-NEXT: .cfi_offset b10, -64 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: bl __fixdfti -; CHECK-NEXT: mov x8, #-4170333254945079296 // =0xc620000000000000 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov x21, #-34359738368 // =0xfffffff800000000 -; CHECK-NEXT: fmov d9, x8 -; CHECK-NEXT: mov x8, #5053038781909696511 // =0x461fffffffffffff -; CHECK-NEXT: mov x22, #34359738367 // =0x7ffffffff -; CHECK-NEXT: fmov d10, x8 -; CHECK-NEXT: mov d8, v0.d[1] -; CHECK-NEXT: fcmp d0, d9 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x21, x1, lt -; CHECK-NEXT: fcmp d0, d10 -; CHECK-NEXT: csel x9, x22, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp d0, d0 -; CHECK-NEXT: fmov d0, d8 -; CHECK-NEXT: csel x19, xzr, x8, vs -; CHECK-NEXT: csel x20, xzr, x9, vs -; CHECK-NEXT: bl __fixdfti -; CHECK-NEXT: fcmp d8, d9 -; CHECK-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x21, x1, lt -; CHECK-NEXT: fcmp d8, d10 -; CHECK-NEXT: mov x0, x19 -; CHECK-NEXT: mov x1, x20 -; CHECK-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: csel x9, x22, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp d8, d8 -; CHECK-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload -; CHECK-NEXT: csel x2, xzr, x8, vs -; CHECK-NEXT: csel x3, xzr, x9, vs -; CHECK-NEXT: add sp, sp, #80 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f64_v2i100: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #80 +; CHECK-SD-NEXT: str d10, [sp, #16] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #40] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 80 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w30, -40 +; CHECK-SD-NEXT: .cfi_offset b8, -48 +; CHECK-SD-NEXT: .cfi_offset b9, -56 +; CHECK-SD-NEXT: .cfi_offset b10, -64 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: bl __fixdfti +; CHECK-SD-NEXT: mov x8, #-4170333254945079296 // =0xc620000000000000 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov x21, #-34359738368 // =0xfffffff800000000 +; CHECK-SD-NEXT: fmov d9, x8 +; CHECK-SD-NEXT: mov x8, #5053038781909696511 // =0x461fffffffffffff +; CHECK-SD-NEXT: mov x22, #34359738367 // =0x7ffffffff +; CHECK-SD-NEXT: fmov d10, x8 +; CHECK-SD-NEXT: mov d8, v0.d[1] +; CHECK-SD-NEXT: fcmp d0, d9 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x21, x1, lt +; CHECK-SD-NEXT: fcmp d0, d10 +; CHECK-SD-NEXT: csel x9, x22, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp d0, d0 +; CHECK-SD-NEXT: fmov d0, d8 +; CHECK-SD-NEXT: csel x19, xzr, x8, vs +; CHECK-SD-NEXT: csel x20, xzr, x9, vs +; CHECK-SD-NEXT: bl __fixdfti +; CHECK-SD-NEXT: fcmp d8, d9 +; CHECK-SD-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x21, x1, lt +; CHECK-SD-NEXT: fcmp d8, d10 +; CHECK-SD-NEXT: mov x0, x19 +; CHECK-SD-NEXT: mov x1, x20 +; CHECK-SD-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload +; CHECK-SD-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x9, x22, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp d8, d8 +; CHECK-SD-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x2, xzr, x8, vs +; CHECK-SD-NEXT: csel x3, xzr, x9, vs +; CHECK-SD-NEXT: add sp, sp, #80 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f64_v2i100: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #80 +; CHECK-GI-NEXT: str d10, [sp, #16] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #40] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 80 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w30, -40 +; CHECK-GI-NEXT: .cfi_offset b8, -48 +; CHECK-GI-NEXT: .cfi_offset b9, -56 +; CHECK-GI-NEXT: .cfi_offset b10, -64 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: mov d8, v0.d[1] +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-NEXT: bl __fixdfti +; CHECK-GI-NEXT: mov x8, #-4170333254945079296 // =0xc620000000000000 +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov x21, #34359738368 // =0x800000000 +; CHECK-GI-NEXT: fmov d9, x8 +; CHECK-GI-NEXT: mov x8, #5053038781909696511 // =0x461fffffffffffff +; CHECK-GI-NEXT: mov x22, #34359738367 // =0x7ffffffff +; CHECK-GI-NEXT: fmov d10, x8 +; CHECK-GI-NEXT: fcmp d0, d9 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x21, x1, lt +; CHECK-GI-NEXT: fcmp d0, d10 +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x22, x9, gt +; CHECK-GI-NEXT: fcmp d0, d0 +; CHECK-GI-NEXT: fmov d0, d8 +; CHECK-GI-NEXT: csel x19, xzr, x8, vs +; CHECK-GI-NEXT: csel x20, xzr, x9, vs +; CHECK-GI-NEXT: bl __fixdfti +; CHECK-GI-NEXT: fcmp d8, d9 +; CHECK-GI-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x21, x1, lt +; CHECK-GI-NEXT: fcmp d8, d10 +; CHECK-GI-NEXT: mov x0, x19 +; CHECK-GI-NEXT: mov x1, x20 +; CHECK-GI-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload +; CHECK-GI-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x22, x9, gt +; CHECK-GI-NEXT: fcmp d8, d8 +; CHECK-GI-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel x2, xzr, x8, vs +; CHECK-GI-NEXT: csel x3, xzr, x9, vs +; CHECK-GI-NEXT: add sp, sp, #80 +; CHECK-GI-NEXT: ret %x = call <2 x i100> @llvm.fptosi.sat.v2f64.v2i100(<2 x double> %f) ret <2 x i100> %x } define <2 x i128> @test_signed_v2f64_v2i128(<2 x double> %f) { -; CHECK-LABEL: test_signed_v2f64_v2i128: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #80 -; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill -; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 80 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w30, -40 -; CHECK-NEXT: .cfi_offset b8, -48 -; CHECK-NEXT: .cfi_offset b9, -56 -; CHECK-NEXT: .cfi_offset b10, -64 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: bl __fixdfti -; CHECK-NEXT: mov x8, #-4044232465378705408 // =0xc7e0000000000000 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov x21, #-9223372036854775808 // =0x8000000000000000 -; CHECK-NEXT: fmov d9, x8 -; CHECK-NEXT: mov x8, #5179139571476070399 // =0x47dfffffffffffff -; CHECK-NEXT: mov x22, #9223372036854775807 // =0x7fffffffffffffff -; CHECK-NEXT: fmov d10, x8 -; CHECK-NEXT: mov d8, v0.d[1] -; CHECK-NEXT: fcmp d0, d9 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x21, x1, lt -; CHECK-NEXT: fcmp d0, d10 -; CHECK-NEXT: csel x9, x22, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp d0, d0 -; CHECK-NEXT: fmov d0, d8 -; CHECK-NEXT: csel x19, xzr, x8, vs -; CHECK-NEXT: csel x20, xzr, x9, vs -; CHECK-NEXT: bl __fixdfti -; CHECK-NEXT: fcmp d8, d9 -; CHECK-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x21, x1, lt -; CHECK-NEXT: fcmp d8, d10 -; CHECK-NEXT: mov x0, x19 -; CHECK-NEXT: mov x1, x20 -; CHECK-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: csel x9, x22, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp d8, d8 -; CHECK-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload -; CHECK-NEXT: csel x2, xzr, x8, vs -; CHECK-NEXT: csel x3, xzr, x9, vs -; CHECK-NEXT: add sp, sp, #80 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v2f64_v2i128: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #80 +; CHECK-SD-NEXT: str d10, [sp, #16] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #40] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 80 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w30, -40 +; CHECK-SD-NEXT: .cfi_offset b8, -48 +; CHECK-SD-NEXT: .cfi_offset b9, -56 +; CHECK-SD-NEXT: .cfi_offset b10, -64 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: bl __fixdfti +; CHECK-SD-NEXT: mov x8, #-4044232465378705408 // =0xc7e0000000000000 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov x21, #-9223372036854775808 // =0x8000000000000000 +; CHECK-SD-NEXT: fmov d9, x8 +; CHECK-SD-NEXT: mov x8, #5179139571476070399 // =0x47dfffffffffffff +; CHECK-SD-NEXT: mov x22, #9223372036854775807 // =0x7fffffffffffffff +; CHECK-SD-NEXT: fmov d10, x8 +; CHECK-SD-NEXT: mov d8, v0.d[1] +; CHECK-SD-NEXT: fcmp d0, d9 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x21, x1, lt +; CHECK-SD-NEXT: fcmp d0, d10 +; CHECK-SD-NEXT: csel x9, x22, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp d0, d0 +; CHECK-SD-NEXT: fmov d0, d8 +; CHECK-SD-NEXT: csel x19, xzr, x8, vs +; CHECK-SD-NEXT: csel x20, xzr, x9, vs +; CHECK-SD-NEXT: bl __fixdfti +; CHECK-SD-NEXT: fcmp d8, d9 +; CHECK-SD-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x21, x1, lt +; CHECK-SD-NEXT: fcmp d8, d10 +; CHECK-SD-NEXT: mov x0, x19 +; CHECK-SD-NEXT: mov x1, x20 +; CHECK-SD-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload +; CHECK-SD-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x9, x22, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp d8, d8 +; CHECK-SD-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x2, xzr, x8, vs +; CHECK-SD-NEXT: csel x3, xzr, x9, vs +; CHECK-SD-NEXT: add sp, sp, #80 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v2f64_v2i128: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #80 +; CHECK-GI-NEXT: str d10, [sp, #16] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #40] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 80 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w30, -40 +; CHECK-GI-NEXT: .cfi_offset b8, -48 +; CHECK-GI-NEXT: .cfi_offset b9, -56 +; CHECK-GI-NEXT: .cfi_offset b10, -64 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: mov d8, v0.d[1] +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-NEXT: bl __fixdfti +; CHECK-GI-NEXT: mov x8, #-4044232465378705408 // =0xc7e0000000000000 +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov x21, #-9223372036854775808 // =0x8000000000000000 +; CHECK-GI-NEXT: fmov d9, x8 +; CHECK-GI-NEXT: mov x8, #5179139571476070399 // =0x47dfffffffffffff +; CHECK-GI-NEXT: mov x22, #9223372036854775807 // =0x7fffffffffffffff +; CHECK-GI-NEXT: fmov d10, x8 +; CHECK-GI-NEXT: fcmp d0, d9 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x21, x1, lt +; CHECK-GI-NEXT: fcmp d0, d10 +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x22, x9, gt +; CHECK-GI-NEXT: fcmp d0, d0 +; CHECK-GI-NEXT: fmov d0, d8 +; CHECK-GI-NEXT: csel x19, xzr, x8, vs +; CHECK-GI-NEXT: csel x20, xzr, x9, vs +; CHECK-GI-NEXT: bl __fixdfti +; CHECK-GI-NEXT: fcmp d8, d9 +; CHECK-GI-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, x21, x1, lt +; CHECK-GI-NEXT: fcmp d8, d10 +; CHECK-GI-NEXT: mov x0, x19 +; CHECK-GI-NEXT: mov x1, x20 +; CHECK-GI-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload +; CHECK-GI-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: csinv x8, x8, xzr, le +; CHECK-GI-NEXT: csel x9, x22, x9, gt +; CHECK-GI-NEXT: fcmp d8, d8 +; CHECK-GI-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel x2, xzr, x8, vs +; CHECK-GI-NEXT: csel x3, xzr, x9, vs +; CHECK-GI-NEXT: add sp, sp, #80 +; CHECK-GI-NEXT: ret %x = call <2 x i128> @llvm.fptosi.sat.v2f64.v2i128(<2 x double> %f) ret <2 x i128> %x } @@ -1570,89 +2933,165 @@ declare <4 x i100> @llvm.fptosi.sat.v4f16.v4i100(<4 x half>) declare <4 x i128> @llvm.fptosi.sat.v4f16.v4i128(<4 x half>) define <4 x i1> @test_signed_v4f16_v4i1(<4 x half> %f) { -; CHECK-CVT-LABEL: test_signed_v4f16_v4i1: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: movi v1.2d, #0000000000000000 -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: smin v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: movi v1.2d, #0xffffffffffffffff -; CHECK-CVT-NEXT: smax v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_v4f16_v4i1: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: movi v1.2d, #0000000000000000 -; CHECK-FP16-NEXT: fcvtzs v0.4h, v0.4h -; CHECK-FP16-NEXT: movi v2.2d, #0xffffffffffffffff -; CHECK-FP16-NEXT: smin v0.4h, v0.4h, v1.4h -; CHECK-FP16-NEXT: smax v0.4h, v0.4h, v2.4h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_v4f16_v4i1: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: movi v1.2d, #0000000000000000 +; CHECK-SD-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: movi v1.2d, #0xffffffffffffffff +; CHECK-SD-CVT-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_v4f16_v4i1: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: movi v1.2d, #0000000000000000 +; CHECK-SD-FP16-NEXT: fcvtzs v0.4h, v0.4h +; CHECK-SD-FP16-NEXT: movi v2.2d, #0xffffffffffffffff +; CHECK-SD-FP16-NEXT: smin v0.4h, v0.4h, v1.4h +; CHECK-SD-FP16-NEXT: smax v0.4h, v0.4h, v2.4h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v4f16_v4i1: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-CVT-NEXT: movi v1.2d, #0000000000000000 +; CHECK-GI-CVT-NEXT: movi v2.2d, #0xffffffffffffffff +; CHECK-GI-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: smax v0.4s, v0.4s, v2.4s +; CHECK-GI-CVT-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v4f16_v4i1: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: movi v1.2d, #0000000000000000 +; CHECK-GI-FP16-NEXT: fcvtzs v0.4h, v0.4h +; CHECK-GI-FP16-NEXT: movi d2, #0xffffffffffffffff +; CHECK-GI-FP16-NEXT: smin v0.4h, v0.4h, v1.4h +; CHECK-GI-FP16-NEXT: smax v0.4h, v0.4h, v2.4h +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i1> @llvm.fptosi.sat.v4f16.v4i1(<4 x half> %f) ret <4 x i1> %x } define <4 x i8> @test_signed_v4f16_v4i8(<4 x half> %f) { -; CHECK-CVT-LABEL: test_signed_v4f16_v4i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: movi v1.4s, #127 -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: smin v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: mvni v1.4s, #127 -; CHECK-CVT-NEXT: smax v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_v4f16_v4i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: movi v1.4h, #127 -; CHECK-FP16-NEXT: fcvtzs v0.4h, v0.4h -; CHECK-FP16-NEXT: smin v0.4h, v0.4h, v1.4h -; CHECK-FP16-NEXT: mvni v1.4h, #127 -; CHECK-FP16-NEXT: smax v0.4h, v0.4h, v1.4h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_v4f16_v4i8: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: movi v1.4s, #127 +; CHECK-SD-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: mvni v1.4s, #127 +; CHECK-SD-CVT-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_v4f16_v4i8: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: movi v1.4h, #127 +; CHECK-SD-FP16-NEXT: fcvtzs v0.4h, v0.4h +; CHECK-SD-FP16-NEXT: smin v0.4h, v0.4h, v1.4h +; CHECK-SD-FP16-NEXT: mvni v1.4h, #127 +; CHECK-SD-FP16-NEXT: smax v0.4h, v0.4h, v1.4h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v4f16_v4i8: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-CVT-NEXT: movi v1.4s, #127 +; CHECK-GI-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: mvni v1.4s, #127 +; CHECK-GI-CVT-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v4f16_v4i8: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: movi v1.4h, #127 +; CHECK-GI-FP16-NEXT: fcvtzs v0.4h, v0.4h +; CHECK-GI-FP16-NEXT: mvni v2.4h, #127 +; CHECK-GI-FP16-NEXT: smin v0.4h, v0.4h, v1.4h +; CHECK-GI-FP16-NEXT: smax v0.4h, v0.4h, v2.4h +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i8> @llvm.fptosi.sat.v4f16.v4i8(<4 x half> %f) ret <4 x i8> %x } define <4 x i13> @test_signed_v4f16_v4i13(<4 x half> %f) { -; CHECK-CVT-LABEL: test_signed_v4f16_v4i13: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: movi v1.4s, #15, msl #8 -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: smin v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: mvni v1.4s, #15, msl #8 -; CHECK-CVT-NEXT: smax v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_v4f16_v4i13: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs v0.4h, v0.4h -; CHECK-FP16-NEXT: mvni v1.4h, #240, lsl #8 -; CHECK-FP16-NEXT: movi v2.4h, #240, lsl #8 -; CHECK-FP16-NEXT: smin v0.4h, v0.4h, v1.4h -; CHECK-FP16-NEXT: smax v0.4h, v0.4h, v2.4h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_v4f16_v4i13: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: movi v1.4s, #15, msl #8 +; CHECK-SD-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: mvni v1.4s, #15, msl #8 +; CHECK-SD-CVT-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_v4f16_v4i13: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs v0.4h, v0.4h +; CHECK-SD-FP16-NEXT: mvni v1.4h, #240, lsl #8 +; CHECK-SD-FP16-NEXT: movi v2.4h, #240, lsl #8 +; CHECK-SD-FP16-NEXT: smin v0.4h, v0.4h, v1.4h +; CHECK-SD-FP16-NEXT: smax v0.4h, v0.4h, v2.4h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v4f16_v4i13: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-CVT-NEXT: movi v1.4s, #15, msl #8 +; CHECK-GI-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: mvni v1.4s, #15, msl #8 +; CHECK-GI-CVT-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v4f16_v4i13: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs v0.4h, v0.4h +; CHECK-GI-FP16-NEXT: mvni v1.4h, #240, lsl #8 +; CHECK-GI-FP16-NEXT: movi v2.4h, #240, lsl #8 +; CHECK-GI-FP16-NEXT: smin v0.4h, v0.4h, v1.4h +; CHECK-GI-FP16-NEXT: smax v0.4h, v0.4h, v2.4h +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i13> @llvm.fptosi.sat.v4f16.v4i13(<4 x half> %f) ret <4 x i13> %x } define <4 x i16> @test_signed_v4f16_v4i16(<4 x half> %f) { -; CHECK-CVT-LABEL: test_signed_v4f16_v4i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: sqxtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_v4f16_v4i16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs v0.4h, v0.4h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_v4f16_v4i16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: sqxtn v0.4h, v0.4s +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_v4f16_v4i16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs v0.4h, v0.4h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v4f16_v4i16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-CVT-NEXT: movi v1.4s, #127, msl #8 +; CHECK-GI-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: mvni v1.4s, #127, msl #8 +; CHECK-GI-CVT-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v4f16_v4i16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs v0.4h, v0.4h +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i16> @llvm.fptosi.sat.v4f16.v4i16(<4 x half> %f) ret <4 x i16> %x } @@ -1682,317 +3121,478 @@ define <4 x i32> @test_signed_v4f16_v4i32_duplicate(<4 x half> %f) { } define <4 x i50> @test_signed_v4f16_v4i50(<4 x half> %f) { -; CHECK-CVT-LABEL: test_signed_v4f16_v4i50: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-CVT-NEXT: mov h1, v0.h[1] -; CHECK-CVT-NEXT: fcvt s2, h0 -; CHECK-CVT-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff -; CHECK-CVT-NEXT: mov h3, v0.h[2] -; CHECK-CVT-NEXT: mov h0, v0.h[3] -; CHECK-CVT-NEXT: mov x11, #-562949953421312 // =0xfffe000000000000 -; CHECK-CVT-NEXT: fcvt s1, h1 -; CHECK-CVT-NEXT: fcvtzs x9, s2 -; CHECK-CVT-NEXT: fcvt s2, h3 -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: fcvtzs x10, s1 -; CHECK-CVT-NEXT: cmp x9, x8 -; CHECK-CVT-NEXT: csel x9, x9, x8, lt -; CHECK-CVT-NEXT: fcvtzs x12, s2 -; CHECK-CVT-NEXT: cmp x9, x11 -; CHECK-CVT-NEXT: csel x0, x9, x11, gt -; CHECK-CVT-NEXT: cmp x10, x8 -; CHECK-CVT-NEXT: csel x9, x10, x8, lt -; CHECK-CVT-NEXT: fcvtzs x10, s0 -; CHECK-CVT-NEXT: cmp x9, x11 -; CHECK-CVT-NEXT: csel x1, x9, x11, gt -; CHECK-CVT-NEXT: cmp x12, x8 -; CHECK-CVT-NEXT: csel x9, x12, x8, lt -; CHECK-CVT-NEXT: cmp x9, x11 -; CHECK-CVT-NEXT: csel x2, x9, x11, gt -; CHECK-CVT-NEXT: cmp x10, x8 -; CHECK-CVT-NEXT: csel x8, x10, x8, lt -; CHECK-CVT-NEXT: cmp x8, x11 -; CHECK-CVT-NEXT: csel x3, x8, x11, gt -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_v4f16_v4i50: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-FP16-NEXT: mov h1, v0.h[1] -; CHECK-FP16-NEXT: fcvtzs x9, h0 -; CHECK-FP16-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff -; CHECK-FP16-NEXT: mov h2, v0.h[2] -; CHECK-FP16-NEXT: mov x11, #-562949953421312 // =0xfffe000000000000 -; CHECK-FP16-NEXT: mov h0, v0.h[3] -; CHECK-FP16-NEXT: fcvtzs x10, h1 -; CHECK-FP16-NEXT: cmp x9, x8 -; CHECK-FP16-NEXT: csel x9, x9, x8, lt -; CHECK-FP16-NEXT: fcvtzs x12, h2 -; CHECK-FP16-NEXT: cmp x9, x11 -; CHECK-FP16-NEXT: csel x0, x9, x11, gt -; CHECK-FP16-NEXT: cmp x10, x8 -; CHECK-FP16-NEXT: csel x9, x10, x8, lt -; CHECK-FP16-NEXT: fcvtzs x10, h0 -; CHECK-FP16-NEXT: cmp x9, x11 -; CHECK-FP16-NEXT: csel x1, x9, x11, gt -; CHECK-FP16-NEXT: cmp x12, x8 -; CHECK-FP16-NEXT: csel x9, x12, x8, lt -; CHECK-FP16-NEXT: cmp x9, x11 -; CHECK-FP16-NEXT: csel x2, x9, x11, gt -; CHECK-FP16-NEXT: cmp x10, x8 -; CHECK-FP16-NEXT: csel x8, x10, x8, lt -; CHECK-FP16-NEXT: cmp x8, x11 -; CHECK-FP16-NEXT: csel x3, x8, x11, gt -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_v4f16_v4i50: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-CVT-NEXT: mov h1, v0.h[1] +; CHECK-SD-CVT-NEXT: fcvt s2, h0 +; CHECK-SD-CVT-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff +; CHECK-SD-CVT-NEXT: mov h3, v0.h[2] +; CHECK-SD-CVT-NEXT: mov h0, v0.h[3] +; CHECK-SD-CVT-NEXT: mov x11, #-562949953421312 // =0xfffe000000000000 +; CHECK-SD-CVT-NEXT: fcvt s1, h1 +; CHECK-SD-CVT-NEXT: fcvtzs x9, s2 +; CHECK-SD-CVT-NEXT: fcvt s2, h3 +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: fcvtzs x10, s1 +; CHECK-SD-CVT-NEXT: cmp x9, x8 +; CHECK-SD-CVT-NEXT: csel x9, x9, x8, lt +; CHECK-SD-CVT-NEXT: fcvtzs x12, s2 +; CHECK-SD-CVT-NEXT: cmp x9, x11 +; CHECK-SD-CVT-NEXT: csel x0, x9, x11, gt +; CHECK-SD-CVT-NEXT: cmp x10, x8 +; CHECK-SD-CVT-NEXT: csel x9, x10, x8, lt +; CHECK-SD-CVT-NEXT: fcvtzs x10, s0 +; CHECK-SD-CVT-NEXT: cmp x9, x11 +; CHECK-SD-CVT-NEXT: csel x1, x9, x11, gt +; CHECK-SD-CVT-NEXT: cmp x12, x8 +; CHECK-SD-CVT-NEXT: csel x9, x12, x8, lt +; CHECK-SD-CVT-NEXT: cmp x9, x11 +; CHECK-SD-CVT-NEXT: csel x2, x9, x11, gt +; CHECK-SD-CVT-NEXT: cmp x10, x8 +; CHECK-SD-CVT-NEXT: csel x8, x10, x8, lt +; CHECK-SD-CVT-NEXT: cmp x8, x11 +; CHECK-SD-CVT-NEXT: csel x3, x8, x11, gt +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_v4f16_v4i50: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-FP16-NEXT: mov h1, v0.h[1] +; CHECK-SD-FP16-NEXT: fcvtzs x9, h0 +; CHECK-SD-FP16-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff +; CHECK-SD-FP16-NEXT: mov h2, v0.h[2] +; CHECK-SD-FP16-NEXT: mov x11, #-562949953421312 // =0xfffe000000000000 +; CHECK-SD-FP16-NEXT: mov h0, v0.h[3] +; CHECK-SD-FP16-NEXT: fcvtzs x10, h1 +; CHECK-SD-FP16-NEXT: cmp x9, x8 +; CHECK-SD-FP16-NEXT: csel x9, x9, x8, lt +; CHECK-SD-FP16-NEXT: fcvtzs x12, h2 +; CHECK-SD-FP16-NEXT: cmp x9, x11 +; CHECK-SD-FP16-NEXT: csel x0, x9, x11, gt +; CHECK-SD-FP16-NEXT: cmp x10, x8 +; CHECK-SD-FP16-NEXT: csel x9, x10, x8, lt +; CHECK-SD-FP16-NEXT: fcvtzs x10, h0 +; CHECK-SD-FP16-NEXT: cmp x9, x11 +; CHECK-SD-FP16-NEXT: csel x1, x9, x11, gt +; CHECK-SD-FP16-NEXT: cmp x12, x8 +; CHECK-SD-FP16-NEXT: csel x9, x12, x8, lt +; CHECK-SD-FP16-NEXT: cmp x9, x11 +; CHECK-SD-FP16-NEXT: csel x2, x9, x11, gt +; CHECK-SD-FP16-NEXT: cmp x10, x8 +; CHECK-SD-FP16-NEXT: csel x8, x10, x8, lt +; CHECK-SD-FP16-NEXT: cmp x8, x11 +; CHECK-SD-FP16-NEXT: csel x3, x8, x11, gt +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v4f16_v4i50: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-CVT-NEXT: mov h1, v0.h[1] +; CHECK-GI-CVT-NEXT: fcvt s2, h0 +; CHECK-GI-CVT-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff +; CHECK-GI-CVT-NEXT: mov h3, v0.h[2] +; CHECK-GI-CVT-NEXT: mov h0, v0.h[3] +; CHECK-GI-CVT-NEXT: mov x11, #-562949953421312 // =0xfffe000000000000 +; CHECK-GI-CVT-NEXT: fcvt s1, h1 +; CHECK-GI-CVT-NEXT: fcvtzs x9, s2 +; CHECK-GI-CVT-NEXT: fcvt s2, h3 +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: fcvtzs x10, s1 +; CHECK-GI-CVT-NEXT: cmp x9, x8 +; CHECK-GI-CVT-NEXT: csel x9, x9, x8, lt +; CHECK-GI-CVT-NEXT: fcvtzs x12, s2 +; CHECK-GI-CVT-NEXT: cmp x9, x11 +; CHECK-GI-CVT-NEXT: csel x0, x9, x11, gt +; CHECK-GI-CVT-NEXT: cmp x10, x8 +; CHECK-GI-CVT-NEXT: csel x9, x10, x8, lt +; CHECK-GI-CVT-NEXT: fcvtzs x10, s0 +; CHECK-GI-CVT-NEXT: cmp x9, x11 +; CHECK-GI-CVT-NEXT: csel x1, x9, x11, gt +; CHECK-GI-CVT-NEXT: cmp x12, x8 +; CHECK-GI-CVT-NEXT: csel x9, x12, x8, lt +; CHECK-GI-CVT-NEXT: cmp x9, x11 +; CHECK-GI-CVT-NEXT: csel x2, x9, x11, gt +; CHECK-GI-CVT-NEXT: cmp x10, x8 +; CHECK-GI-CVT-NEXT: csel x8, x10, x8, lt +; CHECK-GI-CVT-NEXT: cmp x8, x11 +; CHECK-GI-CVT-NEXT: csel x3, x8, x11, gt +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v4f16_v4i50: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-FP16-NEXT: mov h1, v0.h[1] +; CHECK-GI-FP16-NEXT: fcvtzs x9, h0 +; CHECK-GI-FP16-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff +; CHECK-GI-FP16-NEXT: mov h2, v0.h[2] +; CHECK-GI-FP16-NEXT: mov x11, #-562949953421312 // =0xfffe000000000000 +; CHECK-GI-FP16-NEXT: mov h0, v0.h[3] +; CHECK-GI-FP16-NEXT: fcvtzs x10, h1 +; CHECK-GI-FP16-NEXT: cmp x9, x8 +; CHECK-GI-FP16-NEXT: csel x9, x9, x8, lt +; CHECK-GI-FP16-NEXT: fcvtzs x12, h2 +; CHECK-GI-FP16-NEXT: cmp x9, x11 +; CHECK-GI-FP16-NEXT: csel x0, x9, x11, gt +; CHECK-GI-FP16-NEXT: cmp x10, x8 +; CHECK-GI-FP16-NEXT: csel x9, x10, x8, lt +; CHECK-GI-FP16-NEXT: fcvtzs x10, h0 +; CHECK-GI-FP16-NEXT: cmp x9, x11 +; CHECK-GI-FP16-NEXT: csel x1, x9, x11, gt +; CHECK-GI-FP16-NEXT: cmp x12, x8 +; CHECK-GI-FP16-NEXT: csel x9, x12, x8, lt +; CHECK-GI-FP16-NEXT: cmp x9, x11 +; CHECK-GI-FP16-NEXT: csel x2, x9, x11, gt +; CHECK-GI-FP16-NEXT: cmp x10, x8 +; CHECK-GI-FP16-NEXT: csel x8, x10, x8, lt +; CHECK-GI-FP16-NEXT: cmp x8, x11 +; CHECK-GI-FP16-NEXT: csel x3, x8, x11, gt +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i50> @llvm.fptosi.sat.v4f16.v4i50(<4 x half> %f) ret <4 x i50> %x } define <4 x i64> @test_signed_v4f16_v4i64(<4 x half> %f) { -; CHECK-CVT-LABEL: test_signed_v4f16_v4i64: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-CVT-NEXT: mov h1, v0.h[2] -; CHECK-CVT-NEXT: mov h2, v0.h[1] -; CHECK-CVT-NEXT: mov h3, v0.h[3] -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: fcvt s1, h1 -; CHECK-CVT-NEXT: fcvt s2, h2 -; CHECK-CVT-NEXT: fcvt s3, h3 -; CHECK-CVT-NEXT: fcvtzs x8, s0 -; CHECK-CVT-NEXT: fcvtzs x9, s1 -; CHECK-CVT-NEXT: fcvtzs x10, s2 -; CHECK-CVT-NEXT: fcvtzs x11, s3 -; CHECK-CVT-NEXT: fmov d0, x8 -; CHECK-CVT-NEXT: fmov d1, x9 -; CHECK-CVT-NEXT: mov v0.d[1], x10 -; CHECK-CVT-NEXT: mov v1.d[1], x11 -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_v4f16_v4i64: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-FP16-NEXT: mov h1, v0.h[2] -; CHECK-FP16-NEXT: mov h2, v0.h[1] -; CHECK-FP16-NEXT: mov h3, v0.h[3] -; CHECK-FP16-NEXT: fcvtzs x8, h0 -; CHECK-FP16-NEXT: fcvtzs x9, h1 -; CHECK-FP16-NEXT: fcvtzs x10, h2 -; CHECK-FP16-NEXT: fcvtzs x11, h3 -; CHECK-FP16-NEXT: fmov d0, x8 -; CHECK-FP16-NEXT: fmov d1, x9 -; CHECK-FP16-NEXT: mov v0.d[1], x10 -; CHECK-FP16-NEXT: mov v1.d[1], x11 -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_v4f16_v4i64: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-CVT-NEXT: mov h1, v0.h[2] +; CHECK-SD-CVT-NEXT: mov h2, v0.h[1] +; CHECK-SD-CVT-NEXT: mov h3, v0.h[3] +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: fcvt s1, h1 +; CHECK-SD-CVT-NEXT: fcvt s2, h2 +; CHECK-SD-CVT-NEXT: fcvt s3, h3 +; CHECK-SD-CVT-NEXT: fcvtzs x8, s0 +; CHECK-SD-CVT-NEXT: fcvtzs x9, s1 +; CHECK-SD-CVT-NEXT: fcvtzs x10, s2 +; CHECK-SD-CVT-NEXT: fcvtzs x11, s3 +; CHECK-SD-CVT-NEXT: fmov d0, x8 +; CHECK-SD-CVT-NEXT: fmov d1, x9 +; CHECK-SD-CVT-NEXT: mov v0.d[1], x10 +; CHECK-SD-CVT-NEXT: mov v1.d[1], x11 +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_v4f16_v4i64: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-FP16-NEXT: mov h1, v0.h[2] +; CHECK-SD-FP16-NEXT: mov h2, v0.h[1] +; CHECK-SD-FP16-NEXT: mov h3, v0.h[3] +; CHECK-SD-FP16-NEXT: fcvtzs x8, h0 +; CHECK-SD-FP16-NEXT: fcvtzs x9, h1 +; CHECK-SD-FP16-NEXT: fcvtzs x10, h2 +; CHECK-SD-FP16-NEXT: fcvtzs x11, h3 +; CHECK-SD-FP16-NEXT: fmov d0, x8 +; CHECK-SD-FP16-NEXT: fmov d1, x9 +; CHECK-SD-FP16-NEXT: mov v0.d[1], x10 +; CHECK-SD-FP16-NEXT: mov v1.d[1], x11 +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v4f16_v4i64: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl v1.2d, v0.2s +; CHECK-GI-CVT-NEXT: fcvtl2 v2.2d, v0.4s +; CHECK-GI-CVT-NEXT: fcvtzs v0.2d, v1.2d +; CHECK-GI-CVT-NEXT: fcvtzs v1.2d, v2.2d +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v4f16_v4i64: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-FP16-NEXT: mov s1, v0.s[1] +; CHECK-GI-FP16-NEXT: mov h2, v0.h[1] +; CHECK-GI-FP16-NEXT: fcvt d0, h0 +; CHECK-GI-FP16-NEXT: mov h3, v1.h[1] +; CHECK-GI-FP16-NEXT: fcvt d2, h2 +; CHECK-GI-FP16-NEXT: fcvt d1, h1 +; CHECK-GI-FP16-NEXT: fcvt d3, h3 +; CHECK-GI-FP16-NEXT: mov v0.d[1], v2.d[0] +; CHECK-GI-FP16-NEXT: mov v1.d[1], v3.d[0] +; CHECK-GI-FP16-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-FP16-NEXT: fcvtzs v1.2d, v1.2d +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i64> @llvm.fptosi.sat.v4f16.v4i64(<4 x half> %f) ret <4 x i64> %x } define <4 x i100> @test_signed_v4f16_v4i100(<4 x half> %f) { -; CHECK-LABEL: test_signed_v4f16_v4i100: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #112 -; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill -; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill -; CHECK-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 112 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w23, -40 -; CHECK-NEXT: .cfi_offset w24, -48 -; CHECK-NEXT: .cfi_offset w25, -56 -; CHECK-NEXT: .cfi_offset w26, -64 -; CHECK-NEXT: .cfi_offset w30, -72 -; CHECK-NEXT: .cfi_offset b8, -80 -; CHECK-NEXT: .cfi_offset b9, -88 -; CHECK-NEXT: .cfi_offset b10, -96 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: movi v9.2s, #241, lsl #24 -; CHECK-NEXT: mov w8, #1895825407 // =0x70ffffff -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: fmov s10, w8 -; CHECK-NEXT: mov x25, #-34359738368 // =0xfffffff800000000 -; CHECK-NEXT: mov x26, #34359738367 // =0x7ffffffff -; CHECK-NEXT: mov h0, v0.h[1] -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x19, xzr, x8, vs -; CHECK-NEXT: csel x20, xzr, x9, vs -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov h0, v0.h[2] -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x21, xzr, x8, vs -; CHECK-NEXT: csel x22, xzr, x9, vs -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov h0, v0.h[3] -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x23, xzr, x8, vs -; CHECK-NEXT: csel x24, xzr, x9, vs -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: mov x2, x21 -; CHECK-NEXT: mov x3, x22 -; CHECK-NEXT: mov x4, x23 -; CHECK-NEXT: mov x5, x24 -; CHECK-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload -; CHECK-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: mov x0, x19 -; CHECK-NEXT: mov x1, x20 -; CHECK-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload -; CHECK-NEXT: csel x6, xzr, x8, vs -; CHECK-NEXT: csel x7, xzr, x9, vs -; CHECK-NEXT: add sp, sp, #112 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v4f16_v4i100: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #112 +; CHECK-SD-NEXT: str d10, [sp, #16] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #40] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 112 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w23, -40 +; CHECK-SD-NEXT: .cfi_offset w24, -48 +; CHECK-SD-NEXT: .cfi_offset w25, -56 +; CHECK-SD-NEXT: .cfi_offset w26, -64 +; CHECK-SD-NEXT: .cfi_offset w30, -72 +; CHECK-SD-NEXT: .cfi_offset b8, -80 +; CHECK-SD-NEXT: .cfi_offset b9, -88 +; CHECK-SD-NEXT: .cfi_offset b10, -96 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: movi v9.2s, #241, lsl #24 +; CHECK-SD-NEXT: mov w8, #1895825407 // =0x70ffffff +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: fmov s10, w8 +; CHECK-SD-NEXT: mov x25, #-34359738368 // =0xfffffff800000000 +; CHECK-SD-NEXT: mov x26, #34359738367 // =0x7ffffffff +; CHECK-SD-NEXT: mov h0, v0.h[1] +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x19, xzr, x8, vs +; CHECK-SD-NEXT: csel x20, xzr, x9, vs +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov h0, v0.h[2] +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x21, xzr, x8, vs +; CHECK-SD-NEXT: csel x22, xzr, x9, vs +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov h0, v0.h[3] +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x23, xzr, x8, vs +; CHECK-SD-NEXT: csel x24, xzr, x9, vs +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: mov x2, x21 +; CHECK-SD-NEXT: mov x3, x22 +; CHECK-SD-NEXT: mov x4, x23 +; CHECK-SD-NEXT: mov x5, x24 +; CHECK-SD-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload +; CHECK-SD-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: mov x0, x19 +; CHECK-SD-NEXT: mov x1, x20 +; CHECK-SD-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload +; CHECK-SD-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x6, xzr, x8, vs +; CHECK-SD-NEXT: csel x7, xzr, x9, vs +; CHECK-SD-NEXT: add sp, sp, #112 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v4f16_v4i100: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-CVT-NEXT: mov h1, v0.h[1] +; CHECK-GI-CVT-NEXT: mov h2, v0.h[2] +; CHECK-GI-CVT-NEXT: mov x1, xzr +; CHECK-GI-CVT-NEXT: mov h3, v0.h[3] +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov x3, xzr +; CHECK-GI-CVT-NEXT: mov x5, xzr +; CHECK-GI-CVT-NEXT: mov x7, xzr +; CHECK-GI-CVT-NEXT: fcvt s1, h1 +; CHECK-GI-CVT-NEXT: fcvt s2, h2 +; CHECK-GI-CVT-NEXT: fcvt s3, h3 +; CHECK-GI-CVT-NEXT: fcvtzs x0, s0 +; CHECK-GI-CVT-NEXT: fcvtzs x2, s1 +; CHECK-GI-CVT-NEXT: fcvtzs x4, s2 +; CHECK-GI-CVT-NEXT: fcvtzs x6, s3 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v4f16_v4i100: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-FP16-NEXT: mov h1, v0.h[1] +; CHECK-GI-FP16-NEXT: mov h2, v0.h[2] +; CHECK-GI-FP16-NEXT: mov x1, xzr +; CHECK-GI-FP16-NEXT: mov h3, v0.h[3] +; CHECK-GI-FP16-NEXT: fcvtzs x0, h0 +; CHECK-GI-FP16-NEXT: mov x3, xzr +; CHECK-GI-FP16-NEXT: mov x5, xzr +; CHECK-GI-FP16-NEXT: mov x7, xzr +; CHECK-GI-FP16-NEXT: fcvtzs x2, h1 +; CHECK-GI-FP16-NEXT: fcvtzs x4, h2 +; CHECK-GI-FP16-NEXT: fcvtzs x6, h3 +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i100> @llvm.fptosi.sat.v4f16.v4i100(<4 x half> %f) ret <4 x i100> %x } define <4 x i128> @test_signed_v4f16_v4i128(<4 x half> %f) { -; CHECK-LABEL: test_signed_v4f16_v4i128: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #112 -; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill -; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill -; CHECK-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 112 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w23, -40 -; CHECK-NEXT: .cfi_offset w24, -48 -; CHECK-NEXT: .cfi_offset w25, -56 -; CHECK-NEXT: .cfi_offset w26, -64 -; CHECK-NEXT: .cfi_offset w30, -72 -; CHECK-NEXT: .cfi_offset b8, -80 -; CHECK-NEXT: .cfi_offset b9, -88 -; CHECK-NEXT: .cfi_offset b10, -96 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: movi v9.2s, #255, lsl #24 -; CHECK-NEXT: mov w8, #2130706431 // =0x7effffff -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: fmov s10, w8 -; CHECK-NEXT: mov x25, #-9223372036854775808 // =0x8000000000000000 -; CHECK-NEXT: mov x26, #9223372036854775807 // =0x7fffffffffffffff -; CHECK-NEXT: mov h0, v0.h[1] -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x19, xzr, x8, vs -; CHECK-NEXT: csel x20, xzr, x9, vs -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov h0, v0.h[2] -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x21, xzr, x8, vs -; CHECK-NEXT: csel x22, xzr, x9, vs -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov h0, v0.h[3] -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x23, xzr, x8, vs -; CHECK-NEXT: csel x24, xzr, x9, vs -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: mov x2, x21 -; CHECK-NEXT: mov x3, x22 -; CHECK-NEXT: mov x4, x23 -; CHECK-NEXT: mov x5, x24 -; CHECK-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload -; CHECK-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x25, x1, lt -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: mov x0, x19 -; CHECK-NEXT: mov x1, x20 -; CHECK-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload -; CHECK-NEXT: csel x9, x26, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload -; CHECK-NEXT: csel x6, xzr, x8, vs -; CHECK-NEXT: csel x7, xzr, x9, vs -; CHECK-NEXT: add sp, sp, #112 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v4f16_v4i128: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #112 +; CHECK-SD-NEXT: str d10, [sp, #16] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #40] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 112 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w23, -40 +; CHECK-SD-NEXT: .cfi_offset w24, -48 +; CHECK-SD-NEXT: .cfi_offset w25, -56 +; CHECK-SD-NEXT: .cfi_offset w26, -64 +; CHECK-SD-NEXT: .cfi_offset w30, -72 +; CHECK-SD-NEXT: .cfi_offset b8, -80 +; CHECK-SD-NEXT: .cfi_offset b9, -88 +; CHECK-SD-NEXT: .cfi_offset b10, -96 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: movi v9.2s, #255, lsl #24 +; CHECK-SD-NEXT: mov w8, #2130706431 // =0x7effffff +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: fmov s10, w8 +; CHECK-SD-NEXT: mov x25, #-9223372036854775808 // =0x8000000000000000 +; CHECK-SD-NEXT: mov x26, #9223372036854775807 // =0x7fffffffffffffff +; CHECK-SD-NEXT: mov h0, v0.h[1] +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x19, xzr, x8, vs +; CHECK-SD-NEXT: csel x20, xzr, x9, vs +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov h0, v0.h[2] +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x21, xzr, x8, vs +; CHECK-SD-NEXT: csel x22, xzr, x9, vs +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov h0, v0.h[3] +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x23, xzr, x8, vs +; CHECK-SD-NEXT: csel x24, xzr, x9, vs +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: mov x2, x21 +; CHECK-SD-NEXT: mov x3, x22 +; CHECK-SD-NEXT: mov x4, x23 +; CHECK-SD-NEXT: mov x5, x24 +; CHECK-SD-NEXT: ldr x30, [sp, #40] // 8-byte Folded Reload +; CHECK-SD-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x25, x1, lt +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: mov x0, x19 +; CHECK-SD-NEXT: mov x1, x20 +; CHECK-SD-NEXT: ldr d10, [sp, #16] // 8-byte Folded Reload +; CHECK-SD-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x9, x26, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp d9, d8, [sp, #24] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x6, xzr, x8, vs +; CHECK-SD-NEXT: csel x7, xzr, x9, vs +; CHECK-SD-NEXT: add sp, sp, #112 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v4f16_v4i128: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-CVT-NEXT: mov h1, v0.h[1] +; CHECK-GI-CVT-NEXT: mov h2, v0.h[2] +; CHECK-GI-CVT-NEXT: mov x1, xzr +; CHECK-GI-CVT-NEXT: mov h3, v0.h[3] +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov x3, xzr +; CHECK-GI-CVT-NEXT: mov x5, xzr +; CHECK-GI-CVT-NEXT: mov x7, xzr +; CHECK-GI-CVT-NEXT: fcvt s1, h1 +; CHECK-GI-CVT-NEXT: fcvt s2, h2 +; CHECK-GI-CVT-NEXT: fcvt s3, h3 +; CHECK-GI-CVT-NEXT: fcvtzs x0, s0 +; CHECK-GI-CVT-NEXT: fcvtzs x2, s1 +; CHECK-GI-CVT-NEXT: fcvtzs x4, s2 +; CHECK-GI-CVT-NEXT: fcvtzs x6, s3 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v4f16_v4i128: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-FP16-NEXT: mov h1, v0.h[1] +; CHECK-GI-FP16-NEXT: mov h2, v0.h[2] +; CHECK-GI-FP16-NEXT: mov x1, xzr +; CHECK-GI-FP16-NEXT: mov h3, v0.h[3] +; CHECK-GI-FP16-NEXT: fcvtzs x0, h0 +; CHECK-GI-FP16-NEXT: mov x3, xzr +; CHECK-GI-FP16-NEXT: mov x5, xzr +; CHECK-GI-FP16-NEXT: mov x7, xzr +; CHECK-GI-FP16-NEXT: fcvtzs x2, h1 +; CHECK-GI-FP16-NEXT: fcvtzs x4, h2 +; CHECK-GI-FP16-NEXT: fcvtzs x6, h3 +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i128> @llvm.fptosi.sat.v4f16.v4i128(<4 x half> %f) ret <4 x i128> %x } @@ -2012,104 +3612,200 @@ declare <8 x i100> @llvm.fptosi.sat.v8f16.v8i100(<8 x half>) declare <8 x i128> @llvm.fptosi.sat.v8f16.v8i128(<8 x half>) define <8 x i1> @test_signed_v8f16_v8i1(<8 x half> %f) { -; CHECK-CVT-LABEL: test_signed_v8f16_v8i1: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: movi v1.2d, #0000000000000000 -; CHECK-CVT-NEXT: movi v3.2d, #0xffffffffffffffff -; CHECK-CVT-NEXT: fcvtzs v2.4s, v2.4s -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: smin v2.4s, v2.4s, v1.4s -; CHECK-CVT-NEXT: smin v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: smax v1.4s, v2.4s, v3.4s -; CHECK-CVT-NEXT: smax v0.4s, v0.4s, v3.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_v8f16_v8i1: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: movi v1.2d, #0000000000000000 -; CHECK-FP16-NEXT: fcvtzs v0.8h, v0.8h -; CHECK-FP16-NEXT: movi v2.2d, #0xffffffffffffffff -; CHECK-FP16-NEXT: smin v0.8h, v0.8h, v1.8h -; CHECK-FP16-NEXT: smax v0.8h, v0.8h, v2.8h -; CHECK-FP16-NEXT: xtn v0.8b, v0.8h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_v8f16_v8i1: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: movi v1.2d, #0000000000000000 +; CHECK-SD-CVT-NEXT: movi v3.2d, #0xffffffffffffffff +; CHECK-SD-CVT-NEXT: fcvtzs v2.4s, v2.4s +; CHECK-SD-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: smin v2.4s, v2.4s, v1.4s +; CHECK-SD-CVT-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: smax v1.4s, v2.4s, v3.4s +; CHECK-SD-CVT-NEXT: smax v0.4s, v0.4s, v3.4s +; CHECK-SD-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-SD-CVT-NEXT: xtn v0.8b, v0.8h +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_v8f16_v8i1: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: movi v1.2d, #0000000000000000 +; CHECK-SD-FP16-NEXT: fcvtzs v0.8h, v0.8h +; CHECK-SD-FP16-NEXT: movi v2.2d, #0xffffffffffffffff +; CHECK-SD-FP16-NEXT: smin v0.8h, v0.8h, v1.8h +; CHECK-SD-FP16-NEXT: smax v0.8h, v0.8h, v2.8h +; CHECK-SD-FP16-NEXT: xtn v0.8b, v0.8h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v8f16_v8i1: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v2.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-GI-CVT-NEXT: movi v1.2d, #0000000000000000 +; CHECK-GI-CVT-NEXT: movi v3.2d, #0xffffffffffffffff +; CHECK-GI-CVT-NEXT: fcvtzs v2.4s, v2.4s +; CHECK-GI-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: smin v2.4s, v2.4s, v1.4s +; CHECK-GI-CVT-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: smax v1.4s, v2.4s, v3.4s +; CHECK-GI-CVT-NEXT: smax v0.4s, v0.4s, v3.4s +; CHECK-GI-CVT-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-GI-CVT-NEXT: xtn v0.8b, v0.8h +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v8f16_v8i1: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: movi v1.2d, #0000000000000000 +; CHECK-GI-FP16-NEXT: fcvtzs v0.8h, v0.8h +; CHECK-GI-FP16-NEXT: movi v2.2d, #0xffffffffffffffff +; CHECK-GI-FP16-NEXT: smin v0.8h, v0.8h, v1.8h +; CHECK-GI-FP16-NEXT: smax v0.8h, v0.8h, v2.8h +; CHECK-GI-FP16-NEXT: xtn v0.8b, v0.8h +; CHECK-GI-FP16-NEXT: ret %x = call <8 x i1> @llvm.fptosi.sat.v8f16.v8i1(<8 x half> %f) ret <8 x i1> %x } define <8 x i8> @test_signed_v8f16_v8i8(<8 x half> %f) { -; CHECK-CVT-LABEL: test_signed_v8f16_v8i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: movi v1.4s, #127 -; CHECK-CVT-NEXT: fcvtzs v2.4s, v2.4s -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: smin v2.4s, v2.4s, v1.4s -; CHECK-CVT-NEXT: smin v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: mvni v1.4s, #127 -; CHECK-CVT-NEXT: smax v2.4s, v2.4s, v1.4s -; CHECK-CVT-NEXT: smax v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_v8f16_v8i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs v0.8h, v0.8h -; CHECK-FP16-NEXT: sqxtn v0.8b, v0.8h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_v8f16_v8i8: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: movi v1.4s, #127 +; CHECK-SD-CVT-NEXT: fcvtzs v2.4s, v2.4s +; CHECK-SD-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: smin v2.4s, v2.4s, v1.4s +; CHECK-SD-CVT-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: mvni v1.4s, #127 +; CHECK-SD-CVT-NEXT: smax v2.4s, v2.4s, v1.4s +; CHECK-SD-CVT-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-SD-CVT-NEXT: xtn v0.8b, v0.8h +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_v8f16_v8i8: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs v0.8h, v0.8h +; CHECK-SD-FP16-NEXT: sqxtn v0.8b, v0.8h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v8f16_v8i8: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v2.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-GI-CVT-NEXT: movi v1.4s, #127 +; CHECK-GI-CVT-NEXT: fcvtzs v2.4s, v2.4s +; CHECK-GI-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: smin v2.4s, v2.4s, v1.4s +; CHECK-GI-CVT-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: mvni v1.4s, #127 +; CHECK-GI-CVT-NEXT: smax v2.4s, v2.4s, v1.4s +; CHECK-GI-CVT-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-GI-CVT-NEXT: xtn v0.8b, v0.8h +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v8f16_v8i8: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: movi v1.8h, #127 +; CHECK-GI-FP16-NEXT: fcvtzs v0.8h, v0.8h +; CHECK-GI-FP16-NEXT: mvni v2.8h, #127 +; CHECK-GI-FP16-NEXT: smin v0.8h, v0.8h, v1.8h +; CHECK-GI-FP16-NEXT: smax v0.8h, v0.8h, v2.8h +; CHECK-GI-FP16-NEXT: xtn v0.8b, v0.8h +; CHECK-GI-FP16-NEXT: ret %x = call <8 x i8> @llvm.fptosi.sat.v8f16.v8i8(<8 x half> %f) ret <8 x i8> %x } define <8 x i13> @test_signed_v8f16_v8i13(<8 x half> %f) { -; CHECK-CVT-LABEL: test_signed_v8f16_v8i13: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: movi v1.4s, #15, msl #8 -; CHECK-CVT-NEXT: fcvtzs v2.4s, v2.4s -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: smin v2.4s, v2.4s, v1.4s -; CHECK-CVT-NEXT: smin v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: mvni v1.4s, #15, msl #8 -; CHECK-CVT-NEXT: smax v2.4s, v2.4s, v1.4s -; CHECK-CVT-NEXT: smax v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_v8f16_v8i13: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs v0.8h, v0.8h -; CHECK-FP16-NEXT: mvni v1.8h, #240, lsl #8 -; CHECK-FP16-NEXT: movi v2.8h, #240, lsl #8 -; CHECK-FP16-NEXT: smin v0.8h, v0.8h, v1.8h -; CHECK-FP16-NEXT: smax v0.8h, v0.8h, v2.8h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_v8f16_v8i13: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: movi v1.4s, #15, msl #8 +; CHECK-SD-CVT-NEXT: fcvtzs v2.4s, v2.4s +; CHECK-SD-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: smin v2.4s, v2.4s, v1.4s +; CHECK-SD-CVT-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: mvni v1.4s, #15, msl #8 +; CHECK-SD-CVT-NEXT: smax v2.4s, v2.4s, v1.4s +; CHECK-SD-CVT-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_v8f16_v8i13: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs v0.8h, v0.8h +; CHECK-SD-FP16-NEXT: mvni v1.8h, #240, lsl #8 +; CHECK-SD-FP16-NEXT: movi v2.8h, #240, lsl #8 +; CHECK-SD-FP16-NEXT: smin v0.8h, v0.8h, v1.8h +; CHECK-SD-FP16-NEXT: smax v0.8h, v0.8h, v2.8h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v8f16_v8i13: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v2.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-GI-CVT-NEXT: movi v1.4s, #15, msl #8 +; CHECK-GI-CVT-NEXT: fcvtzs v2.4s, v2.4s +; CHECK-GI-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: smin v2.4s, v2.4s, v1.4s +; CHECK-GI-CVT-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: mvni v1.4s, #15, msl #8 +; CHECK-GI-CVT-NEXT: smax v2.4s, v2.4s, v1.4s +; CHECK-GI-CVT-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v8f16_v8i13: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs v0.8h, v0.8h +; CHECK-GI-FP16-NEXT: mvni v1.8h, #240, lsl #8 +; CHECK-GI-FP16-NEXT: movi v2.8h, #240, lsl #8 +; CHECK-GI-FP16-NEXT: smin v0.8h, v0.8h, v1.8h +; CHECK-GI-FP16-NEXT: smax v0.8h, v0.8h, v2.8h +; CHECK-GI-FP16-NEXT: ret %x = call <8 x i13> @llvm.fptosi.sat.v8f16.v8i13(<8 x half> %f) ret <8 x i13> %x } define <8 x i16> @test_signed_v8f16_v8i16(<8 x half> %f) { -; CHECK-CVT-LABEL: test_signed_v8f16_v8i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-CVT-NEXT: sqxtn v0.4h, v1.4s -; CHECK-CVT-NEXT: fcvtzs v1.4s, v2.4s -; CHECK-CVT-NEXT: sqxtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_v8f16_v8i16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs v0.8h, v0.8h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_v8f16_v8i16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl v1.4s, v0.4h +; CHECK-SD-CVT-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-SD-CVT-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-SD-CVT-NEXT: sqxtn v0.4h, v1.4s +; CHECK-SD-CVT-NEXT: fcvtzs v1.4s, v2.4s +; CHECK-SD-CVT-NEXT: sqxtn2 v0.8h, v1.4s +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_v8f16_v8i16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs v0.8h, v0.8h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v8f16_v8i16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v2.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-GI-CVT-NEXT: movi v1.4s, #127, msl #8 +; CHECK-GI-CVT-NEXT: fcvtzs v2.4s, v2.4s +; CHECK-GI-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: smin v2.4s, v2.4s, v1.4s +; CHECK-GI-CVT-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: mvni v1.4s, #127, msl #8 +; CHECK-GI-CVT-NEXT: smax v2.4s, v2.4s, v1.4s +; CHECK-GI-CVT-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v8f16_v8i16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs v0.8h, v0.8h +; CHECK-GI-FP16-NEXT: ret %x = call <8 x i16> @llvm.fptosi.sat.v8f16.v8i16(<8 x half> %f) ret <8 x i16> %x } @@ -2141,394 +3837,663 @@ define <8 x i19> @test_signed_v8f16_v8i19(<8 x half> %f) { } define <8 x i32> @test_signed_v8f16_v8i32_duplicate(<8 x half> %f) { -; CHECK-LABEL: test_signed_v8f16_v8i32_duplicate: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-NEXT: fcvtl v0.4s, v0.4h -; CHECK-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v8f16_v8i32_duplicate: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v8f16_v8i32_duplicate: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-GI-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-GI-NEXT: fcvtzs v0.4s, v1.4s +; CHECK-GI-NEXT: fcvtzs v1.4s, v2.4s +; CHECK-GI-NEXT: ret %x = call <8 x i32> @llvm.fptosi.sat.v8f16.v8i32(<8 x half> %f) ret <8 x i32> %x } define <8 x i50> @test_signed_v8f16_v8i50(<8 x half> %f) { -; CHECK-CVT-LABEL: test_signed_v8f16_v8i50: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-CVT-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff -; CHECK-CVT-NEXT: mov x9, #-562949953421312 // =0xfffe000000000000 -; CHECK-CVT-NEXT: mov h2, v1.h[1] -; CHECK-CVT-NEXT: fcvt s3, h1 -; CHECK-CVT-NEXT: mov h4, v1.h[2] -; CHECK-CVT-NEXT: mov h1, v1.h[3] -; CHECK-CVT-NEXT: fcvt s2, h2 -; CHECK-CVT-NEXT: fcvtzs x10, s3 -; CHECK-CVT-NEXT: fcvt s3, h4 -; CHECK-CVT-NEXT: fcvt s1, h1 -; CHECK-CVT-NEXT: fcvtzs x11, s2 -; CHECK-CVT-NEXT: cmp x10, x8 -; CHECK-CVT-NEXT: fcvtzs x12, s3 -; CHECK-CVT-NEXT: csel x10, x10, x8, lt -; CHECK-CVT-NEXT: mov h2, v0.h[1] -; CHECK-CVT-NEXT: fcvt s3, h0 -; CHECK-CVT-NEXT: cmp x10, x9 -; CHECK-CVT-NEXT: csel x4, x10, x9, gt -; CHECK-CVT-NEXT: cmp x11, x8 -; CHECK-CVT-NEXT: csel x10, x11, x8, lt -; CHECK-CVT-NEXT: fcvtzs x11, s1 -; CHECK-CVT-NEXT: mov h1, v0.h[2] -; CHECK-CVT-NEXT: cmp x10, x9 -; CHECK-CVT-NEXT: fcvt s2, h2 -; CHECK-CVT-NEXT: mov h0, v0.h[3] -; CHECK-CVT-NEXT: csel x5, x10, x9, gt -; CHECK-CVT-NEXT: cmp x12, x8 -; CHECK-CVT-NEXT: csel x10, x12, x8, lt -; CHECK-CVT-NEXT: fcvtzs x12, s3 -; CHECK-CVT-NEXT: cmp x10, x9 -; CHECK-CVT-NEXT: fcvt s1, h1 -; CHECK-CVT-NEXT: csel x6, x10, x9, gt -; CHECK-CVT-NEXT: cmp x11, x8 -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: csel x10, x11, x8, lt -; CHECK-CVT-NEXT: fcvtzs x11, s2 -; CHECK-CVT-NEXT: cmp x10, x9 -; CHECK-CVT-NEXT: csel x7, x10, x9, gt -; CHECK-CVT-NEXT: cmp x12, x8 -; CHECK-CVT-NEXT: csel x10, x12, x8, lt -; CHECK-CVT-NEXT: fcvtzs x12, s1 -; CHECK-CVT-NEXT: cmp x10, x9 -; CHECK-CVT-NEXT: csel x0, x10, x9, gt -; CHECK-CVT-NEXT: cmp x11, x8 -; CHECK-CVT-NEXT: csel x10, x11, x8, lt -; CHECK-CVT-NEXT: fcvtzs x11, s0 -; CHECK-CVT-NEXT: cmp x10, x9 -; CHECK-CVT-NEXT: csel x1, x10, x9, gt -; CHECK-CVT-NEXT: cmp x12, x8 -; CHECK-CVT-NEXT: csel x10, x12, x8, lt -; CHECK-CVT-NEXT: cmp x10, x9 -; CHECK-CVT-NEXT: csel x2, x10, x9, gt -; CHECK-CVT-NEXT: cmp x11, x8 -; CHECK-CVT-NEXT: csel x8, x11, x8, lt -; CHECK-CVT-NEXT: cmp x8, x9 -; CHECK-CVT-NEXT: csel x3, x8, x9, gt -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_v8f16_v8i50: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-FP16-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff -; CHECK-FP16-NEXT: mov x9, #-562949953421312 // =0xfffe000000000000 -; CHECK-FP16-NEXT: mov h2, v1.h[1] -; CHECK-FP16-NEXT: fcvtzs x10, h1 -; CHECK-FP16-NEXT: mov h3, v1.h[2] -; CHECK-FP16-NEXT: mov h1, v1.h[3] -; CHECK-FP16-NEXT: fcvtzs x11, h2 -; CHECK-FP16-NEXT: cmp x10, x8 -; CHECK-FP16-NEXT: fcvtzs x12, h3 -; CHECK-FP16-NEXT: csel x10, x10, x8, lt -; CHECK-FP16-NEXT: mov h2, v0.h[2] -; CHECK-FP16-NEXT: cmp x10, x9 -; CHECK-FP16-NEXT: csel x4, x10, x9, gt -; CHECK-FP16-NEXT: cmp x11, x8 -; CHECK-FP16-NEXT: csel x10, x11, x8, lt -; CHECK-FP16-NEXT: fcvtzs x11, h1 -; CHECK-FP16-NEXT: mov h1, v0.h[1] -; CHECK-FP16-NEXT: cmp x10, x9 -; CHECK-FP16-NEXT: csel x5, x10, x9, gt -; CHECK-FP16-NEXT: cmp x12, x8 -; CHECK-FP16-NEXT: csel x10, x12, x8, lt -; CHECK-FP16-NEXT: fcvtzs x12, h0 -; CHECK-FP16-NEXT: mov h0, v0.h[3] -; CHECK-FP16-NEXT: cmp x10, x9 -; CHECK-FP16-NEXT: csel x6, x10, x9, gt -; CHECK-FP16-NEXT: cmp x11, x8 -; CHECK-FP16-NEXT: csel x10, x11, x8, lt -; CHECK-FP16-NEXT: fcvtzs x11, h1 -; CHECK-FP16-NEXT: cmp x10, x9 -; CHECK-FP16-NEXT: csel x7, x10, x9, gt -; CHECK-FP16-NEXT: cmp x12, x8 -; CHECK-FP16-NEXT: csel x10, x12, x8, lt -; CHECK-FP16-NEXT: fcvtzs x12, h2 -; CHECK-FP16-NEXT: cmp x10, x9 -; CHECK-FP16-NEXT: csel x0, x10, x9, gt -; CHECK-FP16-NEXT: cmp x11, x8 -; CHECK-FP16-NEXT: csel x10, x11, x8, lt -; CHECK-FP16-NEXT: fcvtzs x11, h0 -; CHECK-FP16-NEXT: cmp x10, x9 -; CHECK-FP16-NEXT: csel x1, x10, x9, gt -; CHECK-FP16-NEXT: cmp x12, x8 -; CHECK-FP16-NEXT: csel x10, x12, x8, lt -; CHECK-FP16-NEXT: cmp x10, x9 -; CHECK-FP16-NEXT: csel x2, x10, x9, gt -; CHECK-FP16-NEXT: cmp x11, x8 -; CHECK-FP16-NEXT: csel x8, x11, x8, lt -; CHECK-FP16-NEXT: cmp x8, x9 -; CHECK-FP16-NEXT: csel x3, x8, x9, gt -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_v8f16_v8i50: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-CVT-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff +; CHECK-SD-CVT-NEXT: mov x9, #-562949953421312 // =0xfffe000000000000 +; CHECK-SD-CVT-NEXT: mov h2, v1.h[1] +; CHECK-SD-CVT-NEXT: fcvt s3, h1 +; CHECK-SD-CVT-NEXT: mov h4, v1.h[2] +; CHECK-SD-CVT-NEXT: mov h1, v1.h[3] +; CHECK-SD-CVT-NEXT: fcvt s2, h2 +; CHECK-SD-CVT-NEXT: fcvtzs x10, s3 +; CHECK-SD-CVT-NEXT: fcvt s3, h4 +; CHECK-SD-CVT-NEXT: fcvt s1, h1 +; CHECK-SD-CVT-NEXT: fcvtzs x11, s2 +; CHECK-SD-CVT-NEXT: cmp x10, x8 +; CHECK-SD-CVT-NEXT: fcvtzs x12, s3 +; CHECK-SD-CVT-NEXT: csel x10, x10, x8, lt +; CHECK-SD-CVT-NEXT: mov h2, v0.h[1] +; CHECK-SD-CVT-NEXT: fcvt s3, h0 +; CHECK-SD-CVT-NEXT: cmp x10, x9 +; CHECK-SD-CVT-NEXT: csel x4, x10, x9, gt +; CHECK-SD-CVT-NEXT: cmp x11, x8 +; CHECK-SD-CVT-NEXT: csel x10, x11, x8, lt +; CHECK-SD-CVT-NEXT: fcvtzs x11, s1 +; CHECK-SD-CVT-NEXT: mov h1, v0.h[2] +; CHECK-SD-CVT-NEXT: cmp x10, x9 +; CHECK-SD-CVT-NEXT: fcvt s2, h2 +; CHECK-SD-CVT-NEXT: mov h0, v0.h[3] +; CHECK-SD-CVT-NEXT: csel x5, x10, x9, gt +; CHECK-SD-CVT-NEXT: cmp x12, x8 +; CHECK-SD-CVT-NEXT: csel x10, x12, x8, lt +; CHECK-SD-CVT-NEXT: fcvtzs x12, s3 +; CHECK-SD-CVT-NEXT: cmp x10, x9 +; CHECK-SD-CVT-NEXT: fcvt s1, h1 +; CHECK-SD-CVT-NEXT: csel x6, x10, x9, gt +; CHECK-SD-CVT-NEXT: cmp x11, x8 +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: csel x10, x11, x8, lt +; CHECK-SD-CVT-NEXT: fcvtzs x11, s2 +; CHECK-SD-CVT-NEXT: cmp x10, x9 +; CHECK-SD-CVT-NEXT: csel x7, x10, x9, gt +; CHECK-SD-CVT-NEXT: cmp x12, x8 +; CHECK-SD-CVT-NEXT: csel x10, x12, x8, lt +; CHECK-SD-CVT-NEXT: fcvtzs x12, s1 +; CHECK-SD-CVT-NEXT: cmp x10, x9 +; CHECK-SD-CVT-NEXT: csel x0, x10, x9, gt +; CHECK-SD-CVT-NEXT: cmp x11, x8 +; CHECK-SD-CVT-NEXT: csel x10, x11, x8, lt +; CHECK-SD-CVT-NEXT: fcvtzs x11, s0 +; CHECK-SD-CVT-NEXT: cmp x10, x9 +; CHECK-SD-CVT-NEXT: csel x1, x10, x9, gt +; CHECK-SD-CVT-NEXT: cmp x12, x8 +; CHECK-SD-CVT-NEXT: csel x10, x12, x8, lt +; CHECK-SD-CVT-NEXT: cmp x10, x9 +; CHECK-SD-CVT-NEXT: csel x2, x10, x9, gt +; CHECK-SD-CVT-NEXT: cmp x11, x8 +; CHECK-SD-CVT-NEXT: csel x8, x11, x8, lt +; CHECK-SD-CVT-NEXT: cmp x8, x9 +; CHECK-SD-CVT-NEXT: csel x3, x8, x9, gt +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_v8f16_v8i50: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-FP16-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff +; CHECK-SD-FP16-NEXT: mov x9, #-562949953421312 // =0xfffe000000000000 +; CHECK-SD-FP16-NEXT: mov h2, v1.h[1] +; CHECK-SD-FP16-NEXT: fcvtzs x10, h1 +; CHECK-SD-FP16-NEXT: mov h3, v1.h[2] +; CHECK-SD-FP16-NEXT: mov h1, v1.h[3] +; CHECK-SD-FP16-NEXT: fcvtzs x11, h2 +; CHECK-SD-FP16-NEXT: cmp x10, x8 +; CHECK-SD-FP16-NEXT: fcvtzs x12, h3 +; CHECK-SD-FP16-NEXT: csel x10, x10, x8, lt +; CHECK-SD-FP16-NEXT: mov h2, v0.h[2] +; CHECK-SD-FP16-NEXT: cmp x10, x9 +; CHECK-SD-FP16-NEXT: csel x4, x10, x9, gt +; CHECK-SD-FP16-NEXT: cmp x11, x8 +; CHECK-SD-FP16-NEXT: csel x10, x11, x8, lt +; CHECK-SD-FP16-NEXT: fcvtzs x11, h1 +; CHECK-SD-FP16-NEXT: mov h1, v0.h[1] +; CHECK-SD-FP16-NEXT: cmp x10, x9 +; CHECK-SD-FP16-NEXT: csel x5, x10, x9, gt +; CHECK-SD-FP16-NEXT: cmp x12, x8 +; CHECK-SD-FP16-NEXT: csel x10, x12, x8, lt +; CHECK-SD-FP16-NEXT: fcvtzs x12, h0 +; CHECK-SD-FP16-NEXT: mov h0, v0.h[3] +; CHECK-SD-FP16-NEXT: cmp x10, x9 +; CHECK-SD-FP16-NEXT: csel x6, x10, x9, gt +; CHECK-SD-FP16-NEXT: cmp x11, x8 +; CHECK-SD-FP16-NEXT: csel x10, x11, x8, lt +; CHECK-SD-FP16-NEXT: fcvtzs x11, h1 +; CHECK-SD-FP16-NEXT: cmp x10, x9 +; CHECK-SD-FP16-NEXT: csel x7, x10, x9, gt +; CHECK-SD-FP16-NEXT: cmp x12, x8 +; CHECK-SD-FP16-NEXT: csel x10, x12, x8, lt +; CHECK-SD-FP16-NEXT: fcvtzs x12, h2 +; CHECK-SD-FP16-NEXT: cmp x10, x9 +; CHECK-SD-FP16-NEXT: csel x0, x10, x9, gt +; CHECK-SD-FP16-NEXT: cmp x11, x8 +; CHECK-SD-FP16-NEXT: csel x10, x11, x8, lt +; CHECK-SD-FP16-NEXT: fcvtzs x11, h0 +; CHECK-SD-FP16-NEXT: cmp x10, x9 +; CHECK-SD-FP16-NEXT: csel x1, x10, x9, gt +; CHECK-SD-FP16-NEXT: cmp x12, x8 +; CHECK-SD-FP16-NEXT: csel x10, x12, x8, lt +; CHECK-SD-FP16-NEXT: cmp x10, x9 +; CHECK-SD-FP16-NEXT: csel x2, x10, x9, gt +; CHECK-SD-FP16-NEXT: cmp x11, x8 +; CHECK-SD-FP16-NEXT: csel x8, x11, x8, lt +; CHECK-SD-FP16-NEXT: cmp x8, x9 +; CHECK-SD-FP16-NEXT: csel x3, x8, x9, gt +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v8f16_v8i50: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-GI-CVT-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff +; CHECK-GI-CVT-NEXT: mov x9, #-562949953421312 // =0xfffe000000000000 +; CHECK-GI-CVT-NEXT: mov h2, v1.h[1] +; CHECK-GI-CVT-NEXT: fcvt s3, h1 +; CHECK-GI-CVT-NEXT: mov h4, v1.h[2] +; CHECK-GI-CVT-NEXT: mov h1, v1.h[3] +; CHECK-GI-CVT-NEXT: fcvt s2, h2 +; CHECK-GI-CVT-NEXT: fcvtzs x10, s3 +; CHECK-GI-CVT-NEXT: fcvt s3, h4 +; CHECK-GI-CVT-NEXT: fcvt s1, h1 +; CHECK-GI-CVT-NEXT: fcvtzs x11, s2 +; CHECK-GI-CVT-NEXT: cmp x10, x8 +; CHECK-GI-CVT-NEXT: fcvtzs x12, s3 +; CHECK-GI-CVT-NEXT: csel x10, x10, x8, lt +; CHECK-GI-CVT-NEXT: mov h2, v0.h[1] +; CHECK-GI-CVT-NEXT: fcvt s3, h0 +; CHECK-GI-CVT-NEXT: cmp x10, x9 +; CHECK-GI-CVT-NEXT: csel x4, x10, x9, gt +; CHECK-GI-CVT-NEXT: cmp x11, x8 +; CHECK-GI-CVT-NEXT: csel x10, x11, x8, lt +; CHECK-GI-CVT-NEXT: fcvtzs x11, s1 +; CHECK-GI-CVT-NEXT: mov h1, v0.h[2] +; CHECK-GI-CVT-NEXT: cmp x10, x9 +; CHECK-GI-CVT-NEXT: fcvt s2, h2 +; CHECK-GI-CVT-NEXT: mov h0, v0.h[3] +; CHECK-GI-CVT-NEXT: csel x5, x10, x9, gt +; CHECK-GI-CVT-NEXT: cmp x12, x8 +; CHECK-GI-CVT-NEXT: csel x10, x12, x8, lt +; CHECK-GI-CVT-NEXT: fcvtzs x12, s3 +; CHECK-GI-CVT-NEXT: cmp x10, x9 +; CHECK-GI-CVT-NEXT: fcvt s1, h1 +; CHECK-GI-CVT-NEXT: csel x6, x10, x9, gt +; CHECK-GI-CVT-NEXT: cmp x11, x8 +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: csel x10, x11, x8, lt +; CHECK-GI-CVT-NEXT: fcvtzs x11, s2 +; CHECK-GI-CVT-NEXT: cmp x10, x9 +; CHECK-GI-CVT-NEXT: csel x7, x10, x9, gt +; CHECK-GI-CVT-NEXT: cmp x12, x8 +; CHECK-GI-CVT-NEXT: csel x10, x12, x8, lt +; CHECK-GI-CVT-NEXT: fcvtzs x12, s1 +; CHECK-GI-CVT-NEXT: cmp x10, x9 +; CHECK-GI-CVT-NEXT: csel x0, x10, x9, gt +; CHECK-GI-CVT-NEXT: cmp x11, x8 +; CHECK-GI-CVT-NEXT: csel x10, x11, x8, lt +; CHECK-GI-CVT-NEXT: fcvtzs x11, s0 +; CHECK-GI-CVT-NEXT: cmp x10, x9 +; CHECK-GI-CVT-NEXT: csel x1, x10, x9, gt +; CHECK-GI-CVT-NEXT: cmp x12, x8 +; CHECK-GI-CVT-NEXT: csel x10, x12, x8, lt +; CHECK-GI-CVT-NEXT: cmp x10, x9 +; CHECK-GI-CVT-NEXT: csel x2, x10, x9, gt +; CHECK-GI-CVT-NEXT: cmp x11, x8 +; CHECK-GI-CVT-NEXT: csel x8, x11, x8, lt +; CHECK-GI-CVT-NEXT: cmp x8, x9 +; CHECK-GI-CVT-NEXT: csel x3, x8, x9, gt +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v8f16_v8i50: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-GI-FP16-NEXT: mov x8, #562949953421311 // =0x1ffffffffffff +; CHECK-GI-FP16-NEXT: mov x9, #-562949953421312 // =0xfffe000000000000 +; CHECK-GI-FP16-NEXT: mov h2, v1.h[1] +; CHECK-GI-FP16-NEXT: fcvtzs x10, h1 +; CHECK-GI-FP16-NEXT: mov h3, v1.h[2] +; CHECK-GI-FP16-NEXT: mov h1, v1.h[3] +; CHECK-GI-FP16-NEXT: fcvtzs x11, h2 +; CHECK-GI-FP16-NEXT: cmp x10, x8 +; CHECK-GI-FP16-NEXT: fcvtzs x12, h3 +; CHECK-GI-FP16-NEXT: csel x10, x10, x8, lt +; CHECK-GI-FP16-NEXT: mov h2, v0.h[2] +; CHECK-GI-FP16-NEXT: cmp x10, x9 +; CHECK-GI-FP16-NEXT: csel x4, x10, x9, gt +; CHECK-GI-FP16-NEXT: cmp x11, x8 +; CHECK-GI-FP16-NEXT: csel x10, x11, x8, lt +; CHECK-GI-FP16-NEXT: fcvtzs x11, h1 +; CHECK-GI-FP16-NEXT: mov h1, v0.h[1] +; CHECK-GI-FP16-NEXT: cmp x10, x9 +; CHECK-GI-FP16-NEXT: csel x5, x10, x9, gt +; CHECK-GI-FP16-NEXT: cmp x12, x8 +; CHECK-GI-FP16-NEXT: csel x10, x12, x8, lt +; CHECK-GI-FP16-NEXT: fcvtzs x12, h0 +; CHECK-GI-FP16-NEXT: mov h0, v0.h[3] +; CHECK-GI-FP16-NEXT: cmp x10, x9 +; CHECK-GI-FP16-NEXT: csel x6, x10, x9, gt +; CHECK-GI-FP16-NEXT: cmp x11, x8 +; CHECK-GI-FP16-NEXT: csel x10, x11, x8, lt +; CHECK-GI-FP16-NEXT: fcvtzs x11, h1 +; CHECK-GI-FP16-NEXT: cmp x10, x9 +; CHECK-GI-FP16-NEXT: csel x7, x10, x9, gt +; CHECK-GI-FP16-NEXT: cmp x12, x8 +; CHECK-GI-FP16-NEXT: csel x10, x12, x8, lt +; CHECK-GI-FP16-NEXT: fcvtzs x12, h2 +; CHECK-GI-FP16-NEXT: cmp x10, x9 +; CHECK-GI-FP16-NEXT: csel x0, x10, x9, gt +; CHECK-GI-FP16-NEXT: cmp x11, x8 +; CHECK-GI-FP16-NEXT: csel x10, x11, x8, lt +; CHECK-GI-FP16-NEXT: fcvtzs x11, h0 +; CHECK-GI-FP16-NEXT: cmp x10, x9 +; CHECK-GI-FP16-NEXT: csel x1, x10, x9, gt +; CHECK-GI-FP16-NEXT: cmp x12, x8 +; CHECK-GI-FP16-NEXT: csel x10, x12, x8, lt +; CHECK-GI-FP16-NEXT: cmp x10, x9 +; CHECK-GI-FP16-NEXT: csel x2, x10, x9, gt +; CHECK-GI-FP16-NEXT: cmp x11, x8 +; CHECK-GI-FP16-NEXT: csel x8, x11, x8, lt +; CHECK-GI-FP16-NEXT: cmp x8, x9 +; CHECK-GI-FP16-NEXT: csel x3, x8, x9, gt +; CHECK-GI-FP16-NEXT: ret %x = call <8 x i50> @llvm.fptosi.sat.v8f16.v8i50(<8 x half> %f) ret <8 x i50> %x } define <8 x i64> @test_signed_v8f16_v8i64(<8 x half> %f) { -; CHECK-CVT-LABEL: test_signed_v8f16_v8i64: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-CVT-NEXT: mov h4, v0.h[2] -; CHECK-CVT-NEXT: mov h3, v0.h[1] -; CHECK-CVT-NEXT: mov h7, v0.h[3] -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: mov h2, v1.h[2] -; CHECK-CVT-NEXT: mov h5, v1.h[1] -; CHECK-CVT-NEXT: mov h6, v1.h[3] -; CHECK-CVT-NEXT: fcvt s1, h1 -; CHECK-CVT-NEXT: fcvt s4, h4 -; CHECK-CVT-NEXT: fcvt s3, h3 -; CHECK-CVT-NEXT: fcvt s7, h7 -; CHECK-CVT-NEXT: fcvtzs x9, s0 -; CHECK-CVT-NEXT: fcvt s2, h2 -; CHECK-CVT-NEXT: fcvt s5, h5 -; CHECK-CVT-NEXT: fcvt s6, h6 -; CHECK-CVT-NEXT: fcvtzs x8, s1 -; CHECK-CVT-NEXT: fcvtzs x12, s4 -; CHECK-CVT-NEXT: fcvtzs x11, s3 -; CHECK-CVT-NEXT: fcvtzs x15, s7 -; CHECK-CVT-NEXT: fmov d0, x9 -; CHECK-CVT-NEXT: fcvtzs x10, s2 -; CHECK-CVT-NEXT: fcvtzs x13, s5 -; CHECK-CVT-NEXT: fcvtzs x14, s6 -; CHECK-CVT-NEXT: fmov d2, x8 -; CHECK-CVT-NEXT: fmov d1, x12 -; CHECK-CVT-NEXT: mov v0.d[1], x11 -; CHECK-CVT-NEXT: fmov d3, x10 -; CHECK-CVT-NEXT: mov v2.d[1], x13 -; CHECK-CVT-NEXT: mov v1.d[1], x15 -; CHECK-CVT-NEXT: mov v3.d[1], x14 -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_v8f16_v8i64: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-FP16-NEXT: mov h4, v0.h[2] -; CHECK-FP16-NEXT: mov h3, v0.h[1] -; CHECK-FP16-NEXT: mov h7, v0.h[3] -; CHECK-FP16-NEXT: fcvtzs x9, h0 -; CHECK-FP16-NEXT: mov h2, v1.h[2] -; CHECK-FP16-NEXT: mov h5, v1.h[1] -; CHECK-FP16-NEXT: mov h6, v1.h[3] -; CHECK-FP16-NEXT: fcvtzs x8, h1 -; CHECK-FP16-NEXT: fcvtzs x12, h4 -; CHECK-FP16-NEXT: fcvtzs x11, h3 -; CHECK-FP16-NEXT: fcvtzs x15, h7 -; CHECK-FP16-NEXT: fmov d0, x9 -; CHECK-FP16-NEXT: fcvtzs x10, h2 -; CHECK-FP16-NEXT: fcvtzs x13, h5 -; CHECK-FP16-NEXT: fcvtzs x14, h6 -; CHECK-FP16-NEXT: fmov d2, x8 -; CHECK-FP16-NEXT: fmov d1, x12 -; CHECK-FP16-NEXT: mov v0.d[1], x11 -; CHECK-FP16-NEXT: fmov d3, x10 -; CHECK-FP16-NEXT: mov v2.d[1], x13 -; CHECK-FP16-NEXT: mov v1.d[1], x15 -; CHECK-FP16-NEXT: mov v3.d[1], x14 -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_v8f16_v8i64: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-CVT-NEXT: mov h4, v0.h[2] +; CHECK-SD-CVT-NEXT: mov h3, v0.h[1] +; CHECK-SD-CVT-NEXT: mov h7, v0.h[3] +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: mov h2, v1.h[2] +; CHECK-SD-CVT-NEXT: mov h5, v1.h[1] +; CHECK-SD-CVT-NEXT: mov h6, v1.h[3] +; CHECK-SD-CVT-NEXT: fcvt s1, h1 +; CHECK-SD-CVT-NEXT: fcvt s4, h4 +; CHECK-SD-CVT-NEXT: fcvt s3, h3 +; CHECK-SD-CVT-NEXT: fcvt s7, h7 +; CHECK-SD-CVT-NEXT: fcvtzs x9, s0 +; CHECK-SD-CVT-NEXT: fcvt s2, h2 +; CHECK-SD-CVT-NEXT: fcvt s5, h5 +; CHECK-SD-CVT-NEXT: fcvt s6, h6 +; CHECK-SD-CVT-NEXT: fcvtzs x8, s1 +; CHECK-SD-CVT-NEXT: fcvtzs x12, s4 +; CHECK-SD-CVT-NEXT: fcvtzs x11, s3 +; CHECK-SD-CVT-NEXT: fcvtzs x15, s7 +; CHECK-SD-CVT-NEXT: fmov d0, x9 +; CHECK-SD-CVT-NEXT: fcvtzs x10, s2 +; CHECK-SD-CVT-NEXT: fcvtzs x13, s5 +; CHECK-SD-CVT-NEXT: fcvtzs x14, s6 +; CHECK-SD-CVT-NEXT: fmov d2, x8 +; CHECK-SD-CVT-NEXT: fmov d1, x12 +; CHECK-SD-CVT-NEXT: mov v0.d[1], x11 +; CHECK-SD-CVT-NEXT: fmov d3, x10 +; CHECK-SD-CVT-NEXT: mov v2.d[1], x13 +; CHECK-SD-CVT-NEXT: mov v1.d[1], x15 +; CHECK-SD-CVT-NEXT: mov v3.d[1], x14 +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_v8f16_v8i64: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-FP16-NEXT: mov h4, v0.h[2] +; CHECK-SD-FP16-NEXT: mov h3, v0.h[1] +; CHECK-SD-FP16-NEXT: mov h7, v0.h[3] +; CHECK-SD-FP16-NEXT: fcvtzs x9, h0 +; CHECK-SD-FP16-NEXT: mov h2, v1.h[2] +; CHECK-SD-FP16-NEXT: mov h5, v1.h[1] +; CHECK-SD-FP16-NEXT: mov h6, v1.h[3] +; CHECK-SD-FP16-NEXT: fcvtzs x8, h1 +; CHECK-SD-FP16-NEXT: fcvtzs x12, h4 +; CHECK-SD-FP16-NEXT: fcvtzs x11, h3 +; CHECK-SD-FP16-NEXT: fcvtzs x15, h7 +; CHECK-SD-FP16-NEXT: fmov d0, x9 +; CHECK-SD-FP16-NEXT: fcvtzs x10, h2 +; CHECK-SD-FP16-NEXT: fcvtzs x13, h5 +; CHECK-SD-FP16-NEXT: fcvtzs x14, h6 +; CHECK-SD-FP16-NEXT: fmov d2, x8 +; CHECK-SD-FP16-NEXT: fmov d1, x12 +; CHECK-SD-FP16-NEXT: mov v0.d[1], x11 +; CHECK-SD-FP16-NEXT: fmov d3, x10 +; CHECK-SD-FP16-NEXT: mov v2.d[1], x13 +; CHECK-SD-FP16-NEXT: mov v1.d[1], x15 +; CHECK-SD-FP16-NEXT: mov v3.d[1], x14 +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v8f16_v8i64: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v1.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-GI-CVT-NEXT: fcvtl v2.2d, v1.2s +; CHECK-GI-CVT-NEXT: fcvtl2 v1.2d, v1.4s +; CHECK-GI-CVT-NEXT: fcvtl v3.2d, v0.2s +; CHECK-GI-CVT-NEXT: fcvtl2 v4.2d, v0.4s +; CHECK-GI-CVT-NEXT: fcvtzs v0.2d, v2.2d +; CHECK-GI-CVT-NEXT: fcvtzs v1.2d, v1.2d +; CHECK-GI-CVT-NEXT: fcvtzs v2.2d, v3.2d +; CHECK-GI-CVT-NEXT: fcvtzs v3.2d, v4.2d +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v8f16_v8i64: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: mov s1, v0.s[1] +; CHECK-GI-FP16-NEXT: mov s2, v0.s[2] +; CHECK-GI-FP16-NEXT: mov s3, v0.s[3] +; CHECK-GI-FP16-NEXT: mov h4, v0.h[1] +; CHECK-GI-FP16-NEXT: fcvt d0, h0 +; CHECK-GI-FP16-NEXT: mov h5, v1.h[1] +; CHECK-GI-FP16-NEXT: mov h6, v2.h[1] +; CHECK-GI-FP16-NEXT: mov h7, v3.h[1] +; CHECK-GI-FP16-NEXT: fcvt d4, h4 +; CHECK-GI-FP16-NEXT: fcvt d1, h1 +; CHECK-GI-FP16-NEXT: fcvt d2, h2 +; CHECK-GI-FP16-NEXT: fcvt d3, h3 +; CHECK-GI-FP16-NEXT: fcvt d5, h5 +; CHECK-GI-FP16-NEXT: fcvt d6, h6 +; CHECK-GI-FP16-NEXT: fcvt d7, h7 +; CHECK-GI-FP16-NEXT: mov v0.d[1], v4.d[0] +; CHECK-GI-FP16-NEXT: mov v1.d[1], v5.d[0] +; CHECK-GI-FP16-NEXT: mov v2.d[1], v6.d[0] +; CHECK-GI-FP16-NEXT: mov v3.d[1], v7.d[0] +; CHECK-GI-FP16-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-FP16-NEXT: fcvtzs v1.2d, v1.2d +; CHECK-GI-FP16-NEXT: fcvtzs v2.2d, v2.2d +; CHECK-GI-FP16-NEXT: fcvtzs v3.2d, v3.2d +; CHECK-GI-FP16-NEXT: ret %x = call <8 x i64> @llvm.fptosi.sat.v8f16.v8i64(<8 x half> %f) ret <8 x i64> %x } define <8 x i100> @test_signed_v8f16_v8i100(<8 x half> %f) { -; CHECK-LABEL: test_signed_v8f16_v8i100: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #192 -; CHECK-NEXT: str d10, [sp, #64] // 8-byte Folded Spill -; CHECK-NEXT: stp d9, d8, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: stp x29, x30, [sp, #96] // 16-byte Folded Spill -; CHECK-NEXT: stp x28, x27, [sp, #112] // 16-byte Folded Spill -; CHECK-NEXT: stp x26, x25, [sp, #128] // 16-byte Folded Spill -; CHECK-NEXT: stp x24, x23, [sp, #144] // 16-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #160] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #176] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 192 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w23, -40 -; CHECK-NEXT: .cfi_offset w24, -48 -; CHECK-NEXT: .cfi_offset w25, -56 -; CHECK-NEXT: .cfi_offset w26, -64 -; CHECK-NEXT: .cfi_offset w27, -72 -; CHECK-NEXT: .cfi_offset w28, -80 -; CHECK-NEXT: .cfi_offset w30, -88 -; CHECK-NEXT: .cfi_offset w29, -96 -; CHECK-NEXT: .cfi_offset b8, -104 -; CHECK-NEXT: .cfi_offset b9, -112 -; CHECK-NEXT: .cfi_offset b10, -128 -; CHECK-NEXT: str q0, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: mov x19, x8 -; CHECK-NEXT: str q0, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: mov h0, v0.h[1] -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: movi v10.2s, #241, lsl #24 -; CHECK-NEXT: mov w8, #1895825407 // =0x70ffffff -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: fmov s9, w8 -; CHECK-NEXT: mov x22, #-34359738368 // =0xfffffff800000000 -; CHECK-NEXT: mov x23, #34359738367 // =0x7ffffffff -; CHECK-NEXT: mov h0, v0.h[3] -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: csel x8, x22, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: csinv x9, x9, xzr, le -; CHECK-NEXT: csel x8, x23, x8, gt -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x8, xzr, x8, vs -; CHECK-NEXT: str x8, [sp, #72] // 8-byte Folded Spill -; CHECK-NEXT: csel x8, xzr, x9, vs -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: str x8, [sp, #24] // 8-byte Folded Spill -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x22, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: csel x9, x23, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x10, xzr, x8, vs -; CHECK-NEXT: csel x8, xzr, x9, vs -; CHECK-NEXT: stp x8, x10, [sp, #8] // 16-byte Folded Spill -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: mov h0, v0.h[2] -; CHECK-NEXT: csel x8, x22, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: csinv x9, x9, xzr, le -; CHECK-NEXT: csel x8, x23, x8, gt -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x26, xzr, x8, vs -; CHECK-NEXT: csel x8, xzr, x9, vs -; CHECK-NEXT: str x8, [sp, #32] // 8-byte Folded Spill -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: mov h0, v0.h[1] -; CHECK-NEXT: csel x8, x22, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: csinv x9, x9, xzr, le -; CHECK-NEXT: csel x8, x23, x8, gt -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x27, xzr, x8, vs -; CHECK-NEXT: csel x8, xzr, x9, vs -; CHECK-NEXT: str x8, [sp] // 8-byte Folded Spill -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: mov h0, v0.h[3] -; CHECK-NEXT: csel x8, x22, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: csinv x9, x9, xzr, le -; CHECK-NEXT: csel x8, x23, x8, gt -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x20, xzr, x8, vs -; CHECK-NEXT: csel x21, xzr, x9, vs -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, x22, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: csel x9, x23, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x28, xzr, x8, vs -; CHECK-NEXT: csel x24, xzr, x9, vs -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: mov h0, v0.h[2] -; CHECK-NEXT: csel x8, x22, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: csinv x9, x9, xzr, le -; CHECK-NEXT: csel x8, x23, x8, gt -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x25, xzr, x8, vs -; CHECK-NEXT: csel x29, xzr, x9, vs -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: ldr x9, [sp] // 8-byte Folded Reload -; CHECK-NEXT: extr x8, x24, x28, #28 -; CHECK-NEXT: fcmp s8, s10 -; CHECK-NEXT: bfi x25, x21, #36, #28 -; CHECK-NEXT: lsr x11, x20, #28 -; CHECK-NEXT: stur x9, [x19, #75] -; CHECK-NEXT: extr x9, x20, x21, #28 -; CHECK-NEXT: stur x8, [x19, #41] -; CHECK-NEXT: csel x8, x22, x1, lt -; CHECK-NEXT: str x9, [x19, #16] -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: ldr x10, [sp, #32] // 8-byte Folded Reload -; CHECK-NEXT: stp x29, x25, [x19] -; CHECK-NEXT: stur x10, [x19, #50] -; CHECK-NEXT: lsr x10, x24, #28 -; CHECK-NEXT: csinv x9, x9, xzr, le -; CHECK-NEXT: csel x8, x23, x8, gt -; CHECK-NEXT: fcmp s8, s8 -; CHECK-NEXT: strb w10, [x19, #49] -; CHECK-NEXT: ldp x14, x12, [sp, #8] // 16-byte Folded Reload -; CHECK-NEXT: strb w11, [x19, #24] -; CHECK-NEXT: csel x8, xzr, x8, vs -; CHECK-NEXT: ldr x13, [sp, #24] // 8-byte Folded Reload -; CHECK-NEXT: csel x9, xzr, x9, vs -; CHECK-NEXT: bfi x8, x28, #36, #28 -; CHECK-NEXT: extr x10, x14, x12, #28 -; CHECK-NEXT: bfi x27, x12, #36, #28 -; CHECK-NEXT: ldr x12, [sp, #72] // 8-byte Folded Reload -; CHECK-NEXT: bfi x26, x13, #36, #28 -; CHECK-NEXT: stur x9, [x19, #25] -; CHECK-NEXT: lsr x9, x14, #28 -; CHECK-NEXT: extr x11, x12, x13, #28 -; CHECK-NEXT: stur x8, [x19, #33] -; CHECK-NEXT: lsr x8, x12, #28 -; CHECK-NEXT: stur x10, [x19, #91] -; CHECK-NEXT: stur x27, [x19, #83] -; CHECK-NEXT: stur x11, [x19, #66] -; CHECK-NEXT: stur x26, [x19, #58] -; CHECK-NEXT: strb w9, [x19, #99] -; CHECK-NEXT: strb w8, [x19, #74] -; CHECK-NEXT: ldp x20, x19, [sp, #176] // 16-byte Folded Reload -; CHECK-NEXT: ldr d10, [sp, #64] // 8-byte Folded Reload -; CHECK-NEXT: ldp x22, x21, [sp, #160] // 16-byte Folded Reload -; CHECK-NEXT: ldp x24, x23, [sp, #144] // 16-byte Folded Reload -; CHECK-NEXT: ldp x26, x25, [sp, #128] // 16-byte Folded Reload -; CHECK-NEXT: ldp x28, x27, [sp, #112] // 16-byte Folded Reload -; CHECK-NEXT: ldp x29, x30, [sp, #96] // 16-byte Folded Reload -; CHECK-NEXT: ldp d9, d8, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #192 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v8f16_v8i100: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #192 +; CHECK-SD-NEXT: str d10, [sp, #64] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp d9, d8, [sp, #80] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x29, x30, [sp, #96] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x28, x27, [sp, #112] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x26, x25, [sp, #128] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x24, x23, [sp, #144] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #160] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #176] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 192 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w23, -40 +; CHECK-SD-NEXT: .cfi_offset w24, -48 +; CHECK-SD-NEXT: .cfi_offset w25, -56 +; CHECK-SD-NEXT: .cfi_offset w26, -64 +; CHECK-SD-NEXT: .cfi_offset w27, -72 +; CHECK-SD-NEXT: .cfi_offset w28, -80 +; CHECK-SD-NEXT: .cfi_offset w30, -88 +; CHECK-SD-NEXT: .cfi_offset w29, -96 +; CHECK-SD-NEXT: .cfi_offset b8, -104 +; CHECK-SD-NEXT: .cfi_offset b9, -112 +; CHECK-SD-NEXT: .cfi_offset b10, -128 +; CHECK-SD-NEXT: str q0, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: mov x19, x8 +; CHECK-SD-NEXT: str q0, [sp, #32] // 16-byte Folded Spill +; CHECK-SD-NEXT: mov h0, v0.h[1] +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: movi v10.2s, #241, lsl #24 +; CHECK-SD-NEXT: mov w8, #1895825407 // =0x70ffffff +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: fmov s9, w8 +; CHECK-SD-NEXT: mov x22, #-34359738368 // =0xfffffff800000000 +; CHECK-SD-NEXT: mov x23, #34359738367 // =0x7ffffffff +; CHECK-SD-NEXT: mov h0, v0.h[3] +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: csel x8, x22, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: csinv x9, x9, xzr, le +; CHECK-SD-NEXT: csel x8, x23, x8, gt +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x8, xzr, x8, vs +; CHECK-SD-NEXT: str x8, [sp, #72] // 8-byte Folded Spill +; CHECK-SD-NEXT: csel x8, xzr, x9, vs +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: str x8, [sp, #24] // 8-byte Folded Spill +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x22, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: csel x9, x23, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x10, xzr, x8, vs +; CHECK-SD-NEXT: csel x8, xzr, x9, vs +; CHECK-SD-NEXT: stp x8, x10, [sp, #8] // 16-byte Folded Spill +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov h0, v0.h[2] +; CHECK-SD-NEXT: csel x8, x22, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: csinv x9, x9, xzr, le +; CHECK-SD-NEXT: csel x8, x23, x8, gt +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x26, xzr, x8, vs +; CHECK-SD-NEXT: csel x8, xzr, x9, vs +; CHECK-SD-NEXT: str x8, [sp, #32] // 8-byte Folded Spill +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov h0, v0.h[1] +; CHECK-SD-NEXT: csel x8, x22, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: csinv x9, x9, xzr, le +; CHECK-SD-NEXT: csel x8, x23, x8, gt +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x27, xzr, x8, vs +; CHECK-SD-NEXT: csel x8, xzr, x9, vs +; CHECK-SD-NEXT: str x8, [sp] // 8-byte Folded Spill +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov h0, v0.h[3] +; CHECK-SD-NEXT: csel x8, x22, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: csinv x9, x9, xzr, le +; CHECK-SD-NEXT: csel x8, x23, x8, gt +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x20, xzr, x8, vs +; CHECK-SD-NEXT: csel x21, xzr, x9, vs +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, x22, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: csel x9, x23, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x28, xzr, x8, vs +; CHECK-SD-NEXT: csel x24, xzr, x9, vs +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov h0, v0.h[2] +; CHECK-SD-NEXT: csel x8, x22, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: csinv x9, x9, xzr, le +; CHECK-SD-NEXT: csel x8, x23, x8, gt +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x25, xzr, x8, vs +; CHECK-SD-NEXT: csel x29, xzr, x9, vs +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixsfti +; CHECK-SD-NEXT: ldr x9, [sp] // 8-byte Folded Reload +; CHECK-SD-NEXT: extr x8, x24, x28, #28 +; CHECK-SD-NEXT: fcmp s8, s10 +; CHECK-SD-NEXT: bfi x25, x21, #36, #28 +; CHECK-SD-NEXT: lsr x11, x20, #28 +; CHECK-SD-NEXT: stur x9, [x19, #75] +; CHECK-SD-NEXT: extr x9, x20, x21, #28 +; CHECK-SD-NEXT: stur x8, [x19, #41] +; CHECK-SD-NEXT: csel x8, x22, x1, lt +; CHECK-SD-NEXT: str x9, [x19, #16] +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: ldr x10, [sp, #32] // 8-byte Folded Reload +; CHECK-SD-NEXT: stp x29, x25, [x19] +; CHECK-SD-NEXT: stur x10, [x19, #50] +; CHECK-SD-NEXT: lsr x10, x24, #28 +; CHECK-SD-NEXT: csinv x9, x9, xzr, le +; CHECK-SD-NEXT: csel x8, x23, x8, gt +; CHECK-SD-NEXT: fcmp s8, s8 +; CHECK-SD-NEXT: strb w10, [x19, #49] +; CHECK-SD-NEXT: ldp x14, x12, [sp, #8] // 16-byte Folded Reload +; CHECK-SD-NEXT: strb w11, [x19, #24] +; CHECK-SD-NEXT: csel x8, xzr, x8, vs +; CHECK-SD-NEXT: ldr x13, [sp, #24] // 8-byte Folded Reload +; CHECK-SD-NEXT: csel x9, xzr, x9, vs +; CHECK-SD-NEXT: bfi x8, x28, #36, #28 +; CHECK-SD-NEXT: extr x10, x14, x12, #28 +; CHECK-SD-NEXT: bfi x27, x12, #36, #28 +; CHECK-SD-NEXT: ldr x12, [sp, #72] // 8-byte Folded Reload +; CHECK-SD-NEXT: bfi x26, x13, #36, #28 +; CHECK-SD-NEXT: stur x9, [x19, #25] +; CHECK-SD-NEXT: lsr x9, x14, #28 +; CHECK-SD-NEXT: extr x11, x12, x13, #28 +; CHECK-SD-NEXT: stur x8, [x19, #33] +; CHECK-SD-NEXT: lsr x8, x12, #28 +; CHECK-SD-NEXT: stur x10, [x19, #91] +; CHECK-SD-NEXT: stur x27, [x19, #83] +; CHECK-SD-NEXT: stur x11, [x19, #66] +; CHECK-SD-NEXT: stur x26, [x19, #58] +; CHECK-SD-NEXT: strb w9, [x19, #99] +; CHECK-SD-NEXT: strb w8, [x19, #74] +; CHECK-SD-NEXT: ldp x20, x19, [sp, #176] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr d10, [sp, #64] // 8-byte Folded Reload +; CHECK-SD-NEXT: ldp x22, x21, [sp, #160] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x24, x23, [sp, #144] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x26, x25, [sp, #128] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x28, x27, [sp, #112] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x29, x30, [sp, #96] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp d9, d8, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: add sp, sp, #192 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v8f16_v8i100: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: mov h1, v0.h[1] +; CHECK-GI-CVT-NEXT: mov h2, v0.h[2] +; CHECK-GI-CVT-NEXT: mov x11, x8 +; CHECK-GI-CVT-NEXT: fcvt s3, h0 +; CHECK-GI-CVT-NEXT: mov h4, v0.h[3] +; CHECK-GI-CVT-NEXT: str wzr, [x8, #8] +; CHECK-GI-CVT-NEXT: strb wzr, [x8, #12] +; CHECK-GI-CVT-NEXT: fcvt s1, h1 +; CHECK-GI-CVT-NEXT: fcvt s2, h2 +; CHECK-GI-CVT-NEXT: fcvtzs x9, s3 +; CHECK-GI-CVT-NEXT: fcvt s3, h4 +; CHECK-GI-CVT-NEXT: fcvtzs x10, s1 +; CHECK-GI-CVT-NEXT: mov h1, v0.h[4] +; CHECK-GI-CVT-NEXT: fcvtzs x12, s2 +; CHECK-GI-CVT-NEXT: mov h2, v0.h[5] +; CHECK-GI-CVT-NEXT: str x9, [x8] +; CHECK-GI-CVT-NEXT: mov x9, x8 +; CHECK-GI-CVT-NEXT: fcvt s1, h1 +; CHECK-GI-CVT-NEXT: str x10, [x11, #12]! +; CHECK-GI-CVT-NEXT: fcvtzs x10, s3 +; CHECK-GI-CVT-NEXT: mov h3, v0.h[6] +; CHECK-GI-CVT-NEXT: fcvt s2, h2 +; CHECK-GI-CVT-NEXT: mov h0, v0.h[7] +; CHECK-GI-CVT-NEXT: str wzr, [x11, #8] +; CHECK-GI-CVT-NEXT: strb wzr, [x11, #12] +; CHECK-GI-CVT-NEXT: mov x11, x8 +; CHECK-GI-CVT-NEXT: str x12, [x9, #25]! +; CHECK-GI-CVT-NEXT: fcvtzs x12, s1 +; CHECK-GI-CVT-NEXT: str wzr, [x9, #8] +; CHECK-GI-CVT-NEXT: fcvt s1, h3 +; CHECK-GI-CVT-NEXT: strb wzr, [x9, #12] +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov x9, x8 +; CHECK-GI-CVT-NEXT: str x10, [x11, #37]! +; CHECK-GI-CVT-NEXT: fcvtzs x10, s2 +; CHECK-GI-CVT-NEXT: str wzr, [x11, #8] +; CHECK-GI-CVT-NEXT: strb wzr, [x11, #12] +; CHECK-GI-CVT-NEXT: fcvtzs x11, s1 +; CHECK-GI-CVT-NEXT: str x12, [x9, #50]! +; CHECK-GI-CVT-NEXT: str wzr, [x9, #8] +; CHECK-GI-CVT-NEXT: strb wzr, [x9, #12] +; CHECK-GI-CVT-NEXT: mov x9, x8 +; CHECK-GI-CVT-NEXT: str x10, [x9, #62]! +; CHECK-GI-CVT-NEXT: fcvtzs x10, s0 +; CHECK-GI-CVT-NEXT: str wzr, [x9, #8] +; CHECK-GI-CVT-NEXT: strb wzr, [x9, #12] +; CHECK-GI-CVT-NEXT: mov x9, x8 +; CHECK-GI-CVT-NEXT: str x11, [x9, #75]! +; CHECK-GI-CVT-NEXT: str wzr, [x9, #8] +; CHECK-GI-CVT-NEXT: strb wzr, [x9, #12] +; CHECK-GI-CVT-NEXT: str x10, [x8, #87]! +; CHECK-GI-CVT-NEXT: str wzr, [x8, #8] +; CHECK-GI-CVT-NEXT: strb wzr, [x8, #12] +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v8f16_v8i100: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: mov h1, v0.h[1] +; CHECK-GI-FP16-NEXT: mov h2, v0.h[2] +; CHECK-GI-FP16-NEXT: mov x11, x8 +; CHECK-GI-FP16-NEXT: fcvtzs x9, h0 +; CHECK-GI-FP16-NEXT: str wzr, [x8, #8] +; CHECK-GI-FP16-NEXT: strb wzr, [x8, #12] +; CHECK-GI-FP16-NEXT: fcvtzs x10, h1 +; CHECK-GI-FP16-NEXT: mov h1, v0.h[3] +; CHECK-GI-FP16-NEXT: fcvtzs x12, h2 +; CHECK-GI-FP16-NEXT: mov h2, v0.h[4] +; CHECK-GI-FP16-NEXT: str x9, [x8] +; CHECK-GI-FP16-NEXT: mov x9, x8 +; CHECK-GI-FP16-NEXT: str x10, [x11, #12]! +; CHECK-GI-FP16-NEXT: fcvtzs x10, h1 +; CHECK-GI-FP16-NEXT: mov h1, v0.h[5] +; CHECK-GI-FP16-NEXT: str wzr, [x11, #8] +; CHECK-GI-FP16-NEXT: strb wzr, [x11, #12] +; CHECK-GI-FP16-NEXT: mov x11, x8 +; CHECK-GI-FP16-NEXT: str x12, [x9, #25]! +; CHECK-GI-FP16-NEXT: fcvtzs x12, h2 +; CHECK-GI-FP16-NEXT: str wzr, [x9, #8] +; CHECK-GI-FP16-NEXT: mov h2, v0.h[6] +; CHECK-GI-FP16-NEXT: mov h0, v0.h[7] +; CHECK-GI-FP16-NEXT: strb wzr, [x9, #12] +; CHECK-GI-FP16-NEXT: fcvtzs x9, h1 +; CHECK-GI-FP16-NEXT: str x10, [x11, #37]! +; CHECK-GI-FP16-NEXT: mov x10, x8 +; CHECK-GI-FP16-NEXT: str wzr, [x11, #8] +; CHECK-GI-FP16-NEXT: strb wzr, [x11, #12] +; CHECK-GI-FP16-NEXT: fcvtzs x11, h2 +; CHECK-GI-FP16-NEXT: str x12, [x10, #50]! +; CHECK-GI-FP16-NEXT: str wzr, [x10, #8] +; CHECK-GI-FP16-NEXT: strb wzr, [x10, #12] +; CHECK-GI-FP16-NEXT: mov x10, x8 +; CHECK-GI-FP16-NEXT: str x9, [x10, #62]! +; CHECK-GI-FP16-NEXT: fcvtzs x9, h0 +; CHECK-GI-FP16-NEXT: str wzr, [x10, #8] +; CHECK-GI-FP16-NEXT: strb wzr, [x10, #12] +; CHECK-GI-FP16-NEXT: mov x10, x8 +; CHECK-GI-FP16-NEXT: str x11, [x10, #75]! +; CHECK-GI-FP16-NEXT: str wzr, [x10, #8] +; CHECK-GI-FP16-NEXT: strb wzr, [x10, #12] +; CHECK-GI-FP16-NEXT: str x9, [x8, #87]! +; CHECK-GI-FP16-NEXT: str wzr, [x8, #8] +; CHECK-GI-FP16-NEXT: strb wzr, [x8, #12] +; CHECK-GI-FP16-NEXT: ret %x = call <8 x i100> @llvm.fptosi.sat.v8f16.v8i100(<8 x half> %f) ret <8 x i100> %x } @@ -2730,72 +4695,140 @@ declare <16 x i8> @llvm.fptosi.sat.v16f64.v16i8(<16 x double> %f) declare <16 x i16> @llvm.fptosi.sat.v16f64.v16i16(<16 x double> %f) define <8 x i8> @test_signed_v8f32_v8i8(<8 x float> %f) { -; CHECK-LABEL: test_signed_v8f32_v8i8: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v2.4s, #127 -; CHECK-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-NEXT: smin v1.4s, v1.4s, v2.4s -; CHECK-NEXT: smin v0.4s, v0.4s, v2.4s -; CHECK-NEXT: mvni v2.4s, #127 -; CHECK-NEXT: smax v1.4s, v1.4s, v2.4s -; CHECK-NEXT: smax v0.4s, v0.4s, v2.4s -; CHECK-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-NEXT: xtn v0.8b, v0.8h -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v8f32_v8i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v2.4s, #127 +; CHECK-SD-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-NEXT: smin v1.4s, v1.4s, v2.4s +; CHECK-SD-NEXT: smin v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: mvni v2.4s, #127 +; CHECK-SD-NEXT: smax v1.4s, v1.4s, v2.4s +; CHECK-SD-NEXT: smax v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v8f32_v8i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v2.4s, #127 +; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-GI-NEXT: smin v0.4s, v0.4s, v2.4s +; CHECK-GI-NEXT: smin v1.4s, v1.4s, v2.4s +; CHECK-GI-NEXT: mvni v2.4s, #127 +; CHECK-GI-NEXT: smax v0.4s, v0.4s, v2.4s +; CHECK-GI-NEXT: smax v1.4s, v1.4s, v2.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-GI-NEXT: ret %x = call <8 x i8> @llvm.fptosi.sat.v8f32.v8i8(<8 x float> %f) ret <8 x i8> %x } define <16 x i8> @test_signed_v16f32_v16i8(<16 x float> %f) { -; CHECK-LABEL: test_signed_v16f32_v16i8: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v4.4s, #127 -; CHECK-NEXT: fcvtzs v3.4s, v3.4s -; CHECK-NEXT: fcvtzs v2.4s, v2.4s -; CHECK-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-NEXT: mvni v5.4s, #127 -; CHECK-NEXT: smin v3.4s, v3.4s, v4.4s -; CHECK-NEXT: smin v2.4s, v2.4s, v4.4s -; CHECK-NEXT: smin v1.4s, v1.4s, v4.4s -; CHECK-NEXT: smin v0.4s, v0.4s, v4.4s -; CHECK-NEXT: smax v3.4s, v3.4s, v5.4s -; CHECK-NEXT: smax v2.4s, v2.4s, v5.4s -; CHECK-NEXT: smax v1.4s, v1.4s, v5.4s -; CHECK-NEXT: smax v0.4s, v0.4s, v5.4s -; CHECK-NEXT: uzp1 v2.8h, v2.8h, v3.8h -; CHECK-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-NEXT: uzp1 v0.16b, v0.16b, v2.16b -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v16f32_v16i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v4.4s, #127 +; CHECK-SD-NEXT: fcvtzs v3.4s, v3.4s +; CHECK-SD-NEXT: fcvtzs v2.4s, v2.4s +; CHECK-SD-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-NEXT: mvni v5.4s, #127 +; CHECK-SD-NEXT: smin v3.4s, v3.4s, v4.4s +; CHECK-SD-NEXT: smin v2.4s, v2.4s, v4.4s +; CHECK-SD-NEXT: smin v1.4s, v1.4s, v4.4s +; CHECK-SD-NEXT: smin v0.4s, v0.4s, v4.4s +; CHECK-SD-NEXT: smax v3.4s, v3.4s, v5.4s +; CHECK-SD-NEXT: smax v2.4s, v2.4s, v5.4s +; CHECK-SD-NEXT: smax v1.4s, v1.4s, v5.4s +; CHECK-SD-NEXT: smax v0.4s, v0.4s, v5.4s +; CHECK-SD-NEXT: uzp1 v2.8h, v2.8h, v3.8h +; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v2.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v16f32_v16i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v4.4s, #127 +; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-GI-NEXT: fcvtzs v2.4s, v2.4s +; CHECK-GI-NEXT: fcvtzs v3.4s, v3.4s +; CHECK-GI-NEXT: mvni v5.4s, #127 +; CHECK-GI-NEXT: smin v0.4s, v0.4s, v4.4s +; CHECK-GI-NEXT: smin v1.4s, v1.4s, v4.4s +; CHECK-GI-NEXT: smin v2.4s, v2.4s, v4.4s +; CHECK-GI-NEXT: smin v3.4s, v3.4s, v4.4s +; CHECK-GI-NEXT: smax v0.4s, v0.4s, v5.4s +; CHECK-GI-NEXT: smax v1.4s, v1.4s, v5.4s +; CHECK-GI-NEXT: smax v2.4s, v2.4s, v5.4s +; CHECK-GI-NEXT: smax v3.4s, v3.4s, v5.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: uzp1 v1.8h, v2.8h, v3.8h +; CHECK-GI-NEXT: uzp1 v0.16b, v0.16b, v1.16b +; CHECK-GI-NEXT: ret %x = call <16 x i8> @llvm.fptosi.sat.v16f32.v16i8(<16 x float> %f) ret <16 x i8> %x } define <8 x i16> @test_signed_v8f32_v8i16(<8 x float> %f) { -; CHECK-LABEL: test_signed_v8f32_v8i16: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-NEXT: sqxtn v0.4h, v0.4s -; CHECK-NEXT: sqxtn2 v0.8h, v1.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v8f32_v8i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-SD-NEXT: sqxtn v0.4h, v0.4s +; CHECK-SD-NEXT: sqxtn2 v0.8h, v1.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v8f32_v8i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v2.4s, #127, msl #8 +; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-GI-NEXT: smin v0.4s, v0.4s, v2.4s +; CHECK-GI-NEXT: smin v1.4s, v1.4s, v2.4s +; CHECK-GI-NEXT: mvni v2.4s, #127, msl #8 +; CHECK-GI-NEXT: smax v0.4s, v0.4s, v2.4s +; CHECK-GI-NEXT: smax v1.4s, v1.4s, v2.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: ret %x = call <8 x i16> @llvm.fptosi.sat.v8f32.v8i16(<8 x float> %f) ret <8 x i16> %x } define <16 x i16> @test_signed_v16f32_v16i16(<16 x float> %f) { -; CHECK-LABEL: test_signed_v16f32_v16i16: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-NEXT: fcvtzs v2.4s, v2.4s -; CHECK-NEXT: fcvtzs v4.4s, v1.4s -; CHECK-NEXT: sqxtn v0.4h, v0.4s -; CHECK-NEXT: sqxtn v1.4h, v2.4s -; CHECK-NEXT: fcvtzs v2.4s, v3.4s -; CHECK-NEXT: sqxtn2 v0.8h, v4.4s -; CHECK-NEXT: sqxtn2 v1.8h, v2.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v16f32_v16i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-NEXT: fcvtzs v2.4s, v2.4s +; CHECK-SD-NEXT: fcvtzs v4.4s, v1.4s +; CHECK-SD-NEXT: sqxtn v0.4h, v0.4s +; CHECK-SD-NEXT: sqxtn v1.4h, v2.4s +; CHECK-SD-NEXT: fcvtzs v2.4s, v3.4s +; CHECK-SD-NEXT: sqxtn2 v0.8h, v4.4s +; CHECK-SD-NEXT: sqxtn2 v1.8h, v2.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v16f32_v16i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v4.4s, #127, msl #8 +; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-GI-NEXT: fcvtzs v2.4s, v2.4s +; CHECK-GI-NEXT: fcvtzs v3.4s, v3.4s +; CHECK-GI-NEXT: mvni v5.4s, #127, msl #8 +; CHECK-GI-NEXT: smin v0.4s, v0.4s, v4.4s +; CHECK-GI-NEXT: smin v1.4s, v1.4s, v4.4s +; CHECK-GI-NEXT: smin v2.4s, v2.4s, v4.4s +; CHECK-GI-NEXT: smin v3.4s, v3.4s, v4.4s +; CHECK-GI-NEXT: smax v0.4s, v0.4s, v5.4s +; CHECK-GI-NEXT: smax v1.4s, v1.4s, v5.4s +; CHECK-GI-NEXT: smax v2.4s, v2.4s, v5.4s +; CHECK-GI-NEXT: smax v3.4s, v3.4s, v5.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: uzp1 v1.8h, v2.8h, v3.8h +; CHECK-GI-NEXT: ret %x = call <16 x i16> @llvm.fptosi.sat.v16f32.v16i16(<16 x float> %f) ret <16 x i16> %x } @@ -2803,449 +4836,689 @@ define <16 x i16> @test_signed_v16f32_v16i16(<16 x float> %f) { define <16 x i8> @test_signed_v16f16_v16i8(<16 x half> %f) { -; CHECK-CVT-LABEL: test_signed_v16f16_v16i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl2 v4.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: movi v2.4s, #127 -; CHECK-CVT-NEXT: fcvtzs v3.4s, v3.4s -; CHECK-CVT-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtzs v4.4s, v4.4s -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: smin v3.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: smin v1.4s, v1.4s, v2.4s -; CHECK-CVT-NEXT: smin v4.4s, v4.4s, v2.4s -; CHECK-CVT-NEXT: smin v0.4s, v0.4s, v2.4s -; CHECK-CVT-NEXT: mvni v2.4s, #127 -; CHECK-CVT-NEXT: smax v3.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: smax v1.4s, v1.4s, v2.4s -; CHECK-CVT-NEXT: smax v4.4s, v4.4s, v2.4s -; CHECK-CVT-NEXT: smax v0.4s, v0.4s, v2.4s -; CHECK-CVT-NEXT: uzp1 v1.8h, v1.8h, v3.8h -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v4.8h -; CHECK-CVT-NEXT: uzp1 v0.16b, v0.16b, v1.16b -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_v16f16_v16i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs v0.8h, v0.8h -; CHECK-FP16-NEXT: fcvtzs v1.8h, v1.8h -; CHECK-FP16-NEXT: sqxtn v0.8b, v0.8h -; CHECK-FP16-NEXT: sqxtn2 v0.16b, v1.8h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_v16f16_v16i8: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl2 v3.4s, v1.8h +; CHECK-SD-CVT-NEXT: fcvtl v1.4s, v1.4h +; CHECK-SD-CVT-NEXT: fcvtl2 v4.4s, v0.8h +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: movi v2.4s, #127 +; CHECK-SD-CVT-NEXT: fcvtzs v3.4s, v3.4s +; CHECK-SD-CVT-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-SD-CVT-NEXT: fcvtzs v4.4s, v4.4s +; CHECK-SD-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: smin v3.4s, v3.4s, v2.4s +; CHECK-SD-CVT-NEXT: smin v1.4s, v1.4s, v2.4s +; CHECK-SD-CVT-NEXT: smin v4.4s, v4.4s, v2.4s +; CHECK-SD-CVT-NEXT: smin v0.4s, v0.4s, v2.4s +; CHECK-SD-CVT-NEXT: mvni v2.4s, #127 +; CHECK-SD-CVT-NEXT: smax v3.4s, v3.4s, v2.4s +; CHECK-SD-CVT-NEXT: smax v1.4s, v1.4s, v2.4s +; CHECK-SD-CVT-NEXT: smax v4.4s, v4.4s, v2.4s +; CHECK-SD-CVT-NEXT: smax v0.4s, v0.4s, v2.4s +; CHECK-SD-CVT-NEXT: uzp1 v1.8h, v1.8h, v3.8h +; CHECK-SD-CVT-NEXT: uzp1 v0.8h, v0.8h, v4.8h +; CHECK-SD-CVT-NEXT: uzp1 v0.16b, v0.16b, v1.16b +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_v16f16_v16i8: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs v0.8h, v0.8h +; CHECK-SD-FP16-NEXT: fcvtzs v1.8h, v1.8h +; CHECK-SD-FP16-NEXT: sqxtn v0.8b, v0.8h +; CHECK-SD-FP16-NEXT: sqxtn2 v0.16b, v1.8h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v16f16_v16i8: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v3.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-GI-CVT-NEXT: fcvtl v4.4s, v1.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-GI-CVT-NEXT: movi v2.4s, #127 +; CHECK-GI-CVT-NEXT: mvni v5.4s, #127 +; CHECK-GI-CVT-NEXT: fcvtzs v3.4s, v3.4s +; CHECK-GI-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: fcvtzs v4.4s, v4.4s +; CHECK-GI-CVT-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-GI-CVT-NEXT: smin v3.4s, v3.4s, v2.4s +; CHECK-GI-CVT-NEXT: smin v0.4s, v0.4s, v2.4s +; CHECK-GI-CVT-NEXT: smin v4.4s, v4.4s, v2.4s +; CHECK-GI-CVT-NEXT: smin v1.4s, v1.4s, v2.4s +; CHECK-GI-CVT-NEXT: smax v2.4s, v3.4s, v5.4s +; CHECK-GI-CVT-NEXT: smax v0.4s, v0.4s, v5.4s +; CHECK-GI-CVT-NEXT: smax v3.4s, v4.4s, v5.4s +; CHECK-GI-CVT-NEXT: smax v1.4s, v1.4s, v5.4s +; CHECK-GI-CVT-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-GI-CVT-NEXT: uzp1 v1.8h, v3.8h, v1.8h +; CHECK-GI-CVT-NEXT: uzp1 v0.16b, v0.16b, v1.16b +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v16f16_v16i8: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: movi v2.8h, #127 +; CHECK-GI-FP16-NEXT: fcvtzs v0.8h, v0.8h +; CHECK-GI-FP16-NEXT: fcvtzs v1.8h, v1.8h +; CHECK-GI-FP16-NEXT: mvni v3.8h, #127 +; CHECK-GI-FP16-NEXT: smin v0.8h, v0.8h, v2.8h +; CHECK-GI-FP16-NEXT: smin v1.8h, v1.8h, v2.8h +; CHECK-GI-FP16-NEXT: smax v0.8h, v0.8h, v3.8h +; CHECK-GI-FP16-NEXT: smax v1.8h, v1.8h, v3.8h +; CHECK-GI-FP16-NEXT: uzp1 v0.16b, v0.16b, v1.16b +; CHECK-GI-FP16-NEXT: ret %x = call <16 x i8> @llvm.fptosi.sat.v16f16.v16i8(<16 x half> %f) ret <16 x i8> %x } define <16 x i16> @test_signed_v16f16_v16i16(<16 x half> %f) { -; CHECK-CVT-LABEL: test_signed_v16f16_v16i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v2.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v3.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl2 v4.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl2 v5.4s, v1.8h -; CHECK-CVT-NEXT: fcvtzs v2.4s, v2.4s -; CHECK-CVT-NEXT: fcvtzs v1.4s, v3.4s -; CHECK-CVT-NEXT: fcvtzs v3.4s, v5.4s -; CHECK-CVT-NEXT: sqxtn v0.4h, v2.4s -; CHECK-CVT-NEXT: fcvtzs v2.4s, v4.4s -; CHECK-CVT-NEXT: sqxtn v1.4h, v1.4s -; CHECK-CVT-NEXT: sqxtn2 v0.8h, v2.4s -; CHECK-CVT-NEXT: sqxtn2 v1.8h, v3.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_signed_v16f16_v16i16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs v0.8h, v0.8h -; CHECK-FP16-NEXT: fcvtzs v1.8h, v1.8h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_signed_v16f16_v16i16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl v2.4s, v0.4h +; CHECK-SD-CVT-NEXT: fcvtl v3.4s, v1.4h +; CHECK-SD-CVT-NEXT: fcvtl2 v4.4s, v0.8h +; CHECK-SD-CVT-NEXT: fcvtl2 v5.4s, v1.8h +; CHECK-SD-CVT-NEXT: fcvtzs v2.4s, v2.4s +; CHECK-SD-CVT-NEXT: fcvtzs v1.4s, v3.4s +; CHECK-SD-CVT-NEXT: fcvtzs v3.4s, v5.4s +; CHECK-SD-CVT-NEXT: sqxtn v0.4h, v2.4s +; CHECK-SD-CVT-NEXT: fcvtzs v2.4s, v4.4s +; CHECK-SD-CVT-NEXT: sqxtn v1.4h, v1.4s +; CHECK-SD-CVT-NEXT: sqxtn2 v0.8h, v2.4s +; CHECK-SD-CVT-NEXT: sqxtn2 v1.8h, v3.4s +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_signed_v16f16_v16i16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzs v0.8h, v0.8h +; CHECK-SD-FP16-NEXT: fcvtzs v1.8h, v1.8h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_signed_v16f16_v16i16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v3.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-GI-CVT-NEXT: fcvtl v4.4s, v1.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-GI-CVT-NEXT: movi v2.4s, #127, msl #8 +; CHECK-GI-CVT-NEXT: mvni v5.4s, #127, msl #8 +; CHECK-GI-CVT-NEXT: fcvtzs v3.4s, v3.4s +; CHECK-GI-CVT-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: fcvtzs v4.4s, v4.4s +; CHECK-GI-CVT-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-GI-CVT-NEXT: smin v3.4s, v3.4s, v2.4s +; CHECK-GI-CVT-NEXT: smin v0.4s, v0.4s, v2.4s +; CHECK-GI-CVT-NEXT: smin v4.4s, v4.4s, v2.4s +; CHECK-GI-CVT-NEXT: smin v1.4s, v1.4s, v2.4s +; CHECK-GI-CVT-NEXT: smax v2.4s, v3.4s, v5.4s +; CHECK-GI-CVT-NEXT: smax v0.4s, v0.4s, v5.4s +; CHECK-GI-CVT-NEXT: smax v3.4s, v4.4s, v5.4s +; CHECK-GI-CVT-NEXT: smax v1.4s, v1.4s, v5.4s +; CHECK-GI-CVT-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-GI-CVT-NEXT: uzp1 v1.8h, v3.8h, v1.8h +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_signed_v16f16_v16i16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzs v0.8h, v0.8h +; CHECK-GI-FP16-NEXT: fcvtzs v1.8h, v1.8h +; CHECK-GI-FP16-NEXT: ret %x = call <16 x i16> @llvm.fptosi.sat.v16f16.v16i16(<16 x half> %f) ret <16 x i16> %x } define <8 x i8> @test_signed_v8f64_v8i8(<8 x double> %f) { -; CHECK-LABEL: test_signed_v8f64_v8i8: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d4, v3.d[1] -; CHECK-NEXT: fcvtzs w11, d3 -; CHECK-NEXT: mov w9, #127 // =0x7f -; CHECK-NEXT: mov d3, v1.d[1] -; CHECK-NEXT: fcvtzs w13, d2 -; CHECK-NEXT: fcvtzs w15, d1 -; CHECK-NEXT: fcvtzs w17, d0 -; CHECK-NEXT: fcvtzs w8, d4 -; CHECK-NEXT: mov d4, v2.d[1] -; CHECK-NEXT: mov d2, v0.d[1] -; CHECK-NEXT: fcvtzs w14, d3 -; CHECK-NEXT: cmp w8, #127 -; CHECK-NEXT: fcvtzs w12, d4 -; CHECK-NEXT: fcvtzs w16, d2 -; CHECK-NEXT: csel w10, w8, w9, lt -; CHECK-NEXT: mov w8, #-128 // =0xffffff80 -; CHECK-NEXT: cmn w10, #128 -; CHECK-NEXT: csel w10, w10, w8, gt -; CHECK-NEXT: cmp w11, #127 -; CHECK-NEXT: csel w11, w11, w9, lt -; CHECK-NEXT: cmn w11, #128 -; CHECK-NEXT: csel w11, w11, w8, gt -; CHECK-NEXT: cmp w12, #127 -; CHECK-NEXT: csel w12, w12, w9, lt -; CHECK-NEXT: fmov s3, w11 -; CHECK-NEXT: cmn w12, #128 -; CHECK-NEXT: csel w12, w12, w8, gt -; CHECK-NEXT: cmp w13, #127 -; CHECK-NEXT: csel w13, w13, w9, lt -; CHECK-NEXT: mov v3.s[1], w10 -; CHECK-NEXT: cmn w13, #128 -; CHECK-NEXT: csel w13, w13, w8, gt -; CHECK-NEXT: cmp w14, #127 -; CHECK-NEXT: csel w14, w14, w9, lt -; CHECK-NEXT: fmov s2, w13 -; CHECK-NEXT: cmn w14, #128 -; CHECK-NEXT: csel w14, w14, w8, gt -; CHECK-NEXT: cmp w15, #127 -; CHECK-NEXT: csel w15, w15, w9, lt -; CHECK-NEXT: mov v2.s[1], w12 -; CHECK-NEXT: cmn w15, #128 -; CHECK-NEXT: csel w15, w15, w8, gt -; CHECK-NEXT: cmp w16, #127 -; CHECK-NEXT: csel w11, w16, w9, lt -; CHECK-NEXT: fmov s1, w15 -; CHECK-NEXT: cmn w11, #128 -; CHECK-NEXT: csel w10, w11, w8, gt -; CHECK-NEXT: cmp w17, #127 -; CHECK-NEXT: csel w9, w17, w9, lt -; CHECK-NEXT: mov v1.s[1], w14 -; CHECK-NEXT: cmn w9, #128 -; CHECK-NEXT: csel w8, w9, w8, gt -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: adrp x8, .LCPI82_0 -; CHECK-NEXT: ldr d4, [x8, :lo12:.LCPI82_0] -; CHECK-NEXT: mov v0.s[1], w10 -; CHECK-NEXT: tbl v0.8b, { v0.16b, v1.16b, v2.16b, v3.16b }, v4.8b -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v8f64_v8i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d4, v3.d[1] +; CHECK-SD-NEXT: fcvtzs w11, d3 +; CHECK-SD-NEXT: mov w9, #127 // =0x7f +; CHECK-SD-NEXT: mov d3, v1.d[1] +; CHECK-SD-NEXT: fcvtzs w13, d2 +; CHECK-SD-NEXT: fcvtzs w15, d1 +; CHECK-SD-NEXT: fcvtzs w17, d0 +; CHECK-SD-NEXT: fcvtzs w8, d4 +; CHECK-SD-NEXT: mov d4, v2.d[1] +; CHECK-SD-NEXT: mov d2, v0.d[1] +; CHECK-SD-NEXT: fcvtzs w14, d3 +; CHECK-SD-NEXT: cmp w8, #127 +; CHECK-SD-NEXT: fcvtzs w12, d4 +; CHECK-SD-NEXT: fcvtzs w16, d2 +; CHECK-SD-NEXT: csel w10, w8, w9, lt +; CHECK-SD-NEXT: mov w8, #-128 // =0xffffff80 +; CHECK-SD-NEXT: cmn w10, #128 +; CHECK-SD-NEXT: csel w10, w10, w8, gt +; CHECK-SD-NEXT: cmp w11, #127 +; CHECK-SD-NEXT: csel w11, w11, w9, lt +; CHECK-SD-NEXT: cmn w11, #128 +; CHECK-SD-NEXT: csel w11, w11, w8, gt +; CHECK-SD-NEXT: cmp w12, #127 +; CHECK-SD-NEXT: csel w12, w12, w9, lt +; CHECK-SD-NEXT: fmov s3, w11 +; CHECK-SD-NEXT: cmn w12, #128 +; CHECK-SD-NEXT: csel w12, w12, w8, gt +; CHECK-SD-NEXT: cmp w13, #127 +; CHECK-SD-NEXT: csel w13, w13, w9, lt +; CHECK-SD-NEXT: mov v3.s[1], w10 +; CHECK-SD-NEXT: cmn w13, #128 +; CHECK-SD-NEXT: csel w13, w13, w8, gt +; CHECK-SD-NEXT: cmp w14, #127 +; CHECK-SD-NEXT: csel w14, w14, w9, lt +; CHECK-SD-NEXT: fmov s2, w13 +; CHECK-SD-NEXT: cmn w14, #128 +; CHECK-SD-NEXT: csel w14, w14, w8, gt +; CHECK-SD-NEXT: cmp w15, #127 +; CHECK-SD-NEXT: csel w15, w15, w9, lt +; CHECK-SD-NEXT: mov v2.s[1], w12 +; CHECK-SD-NEXT: cmn w15, #128 +; CHECK-SD-NEXT: csel w15, w15, w8, gt +; CHECK-SD-NEXT: cmp w16, #127 +; CHECK-SD-NEXT: csel w11, w16, w9, lt +; CHECK-SD-NEXT: fmov s1, w15 +; CHECK-SD-NEXT: cmn w11, #128 +; CHECK-SD-NEXT: csel w10, w11, w8, gt +; CHECK-SD-NEXT: cmp w17, #127 +; CHECK-SD-NEXT: csel w9, w17, w9, lt +; CHECK-SD-NEXT: mov v1.s[1], w14 +; CHECK-SD-NEXT: cmn w9, #128 +; CHECK-SD-NEXT: csel w8, w9, w8, gt +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: adrp x8, .LCPI82_0 +; CHECK-SD-NEXT: ldr d4, [x8, :lo12:.LCPI82_0] +; CHECK-SD-NEXT: mov v0.s[1], w10 +; CHECK-SD-NEXT: tbl v0.8b, { v0.16b, v1.16b, v2.16b, v3.16b }, v4.8b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v8f64_v8i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: fcvtzs v1.2d, v1.2d +; CHECK-GI-NEXT: adrp x8, .LCPI82_1 +; CHECK-GI-NEXT: fcvtzs v2.2d, v2.2d +; CHECK-GI-NEXT: fcvtzs v3.2d, v3.2d +; CHECK-GI-NEXT: ldr q4, [x8, :lo12:.LCPI82_1] +; CHECK-GI-NEXT: adrp x8, .LCPI82_0 +; CHECK-GI-NEXT: cmgt v5.2d, v4.2d, v0.2d +; CHECK-GI-NEXT: cmgt v6.2d, v4.2d, v1.2d +; CHECK-GI-NEXT: cmgt v7.2d, v4.2d, v2.2d +; CHECK-GI-NEXT: cmgt v16.2d, v4.2d, v3.2d +; CHECK-GI-NEXT: bif v0.16b, v4.16b, v5.16b +; CHECK-GI-NEXT: bif v1.16b, v4.16b, v6.16b +; CHECK-GI-NEXT: bif v2.16b, v4.16b, v7.16b +; CHECK-GI-NEXT: bif v3.16b, v4.16b, v16.16b +; CHECK-GI-NEXT: ldr q4, [x8, :lo12:.LCPI82_0] +; CHECK-GI-NEXT: cmgt v5.2d, v0.2d, v4.2d +; CHECK-GI-NEXT: cmgt v6.2d, v1.2d, v4.2d +; CHECK-GI-NEXT: cmgt v7.2d, v2.2d, v4.2d +; CHECK-GI-NEXT: cmgt v16.2d, v3.2d, v4.2d +; CHECK-GI-NEXT: bif v0.16b, v4.16b, v5.16b +; CHECK-GI-NEXT: bif v1.16b, v4.16b, v6.16b +; CHECK-GI-NEXT: bif v2.16b, v4.16b, v7.16b +; CHECK-GI-NEXT: bif v3.16b, v4.16b, v16.16b +; CHECK-GI-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: uzp1 v1.4s, v2.4s, v3.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-GI-NEXT: ret %x = call <8 x i8> @llvm.fptosi.sat.v8f64.v8i8(<8 x double> %f) ret <8 x i8> %x } define <16 x i8> @test_signed_v16f64_v16i8(<16 x double> %f) { -; CHECK-LABEL: test_signed_v16f64_v16i8: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d16, v0.d[1] -; CHECK-NEXT: fcvtzs w10, d0 -; CHECK-NEXT: mov w8, #127 // =0x7f -; CHECK-NEXT: mov d0, v1.d[1] -; CHECK-NEXT: fcvtzs w13, d1 -; CHECK-NEXT: mov d1, v2.d[1] -; CHECK-NEXT: fcvtzs w9, d16 -; CHECK-NEXT: fcvtzs w12, d0 -; CHECK-NEXT: cmp w9, #127 -; CHECK-NEXT: csel w11, w9, w8, lt -; CHECK-NEXT: mov w9, #-128 // =0xffffff80 -; CHECK-NEXT: cmn w11, #128 -; CHECK-NEXT: csel w11, w11, w9, gt -; CHECK-NEXT: cmp w10, #127 -; CHECK-NEXT: csel w10, w10, w8, lt -; CHECK-NEXT: cmn w10, #128 -; CHECK-NEXT: csel w10, w10, w9, gt -; CHECK-NEXT: cmp w12, #127 -; CHECK-NEXT: fmov s0, w10 -; CHECK-NEXT: csel w10, w12, w8, lt -; CHECK-NEXT: cmn w10, #128 -; CHECK-NEXT: csel w10, w10, w9, gt -; CHECK-NEXT: cmp w13, #127 -; CHECK-NEXT: csel w12, w13, w8, lt -; CHECK-NEXT: mov v0.s[1], w11 -; CHECK-NEXT: fcvtzs w11, d1 -; CHECK-NEXT: cmn w12, #128 -; CHECK-NEXT: csel w12, w12, w9, gt -; CHECK-NEXT: fmov s1, w12 -; CHECK-NEXT: fcvtzs w12, d2 -; CHECK-NEXT: mov d2, v3.d[1] -; CHECK-NEXT: cmp w11, #127 -; CHECK-NEXT: mov w13, v0.s[1] -; CHECK-NEXT: mov v1.s[1], w10 -; CHECK-NEXT: csel w10, w11, w8, lt -; CHECK-NEXT: cmn w10, #128 -; CHECK-NEXT: fcvtzs w11, d2 -; CHECK-NEXT: csel w10, w10, w9, gt -; CHECK-NEXT: cmp w12, #127 -; CHECK-NEXT: mov v0.b[1], w13 -; CHECK-NEXT: csel w12, w12, w8, lt -; CHECK-NEXT: cmn w12, #128 -; CHECK-NEXT: mov w13, v1.s[1] -; CHECK-NEXT: csel w12, w12, w9, gt -; CHECK-NEXT: cmp w11, #127 -; CHECK-NEXT: fmov s2, w12 -; CHECK-NEXT: fcvtzs w12, d3 -; CHECK-NEXT: mov d3, v4.d[1] -; CHECK-NEXT: mov v0.b[2], v1.b[0] -; CHECK-NEXT: mov v2.s[1], w10 -; CHECK-NEXT: csel w10, w11, w8, lt -; CHECK-NEXT: cmn w10, #128 -; CHECK-NEXT: fcvtzs w11, d3 -; CHECK-NEXT: csel w10, w10, w9, gt -; CHECK-NEXT: cmp w12, #127 -; CHECK-NEXT: mov v0.b[3], w13 -; CHECK-NEXT: csel w12, w12, w8, lt -; CHECK-NEXT: cmn w12, #128 -; CHECK-NEXT: mov w13, v2.s[1] -; CHECK-NEXT: csel w12, w12, w9, gt -; CHECK-NEXT: cmp w11, #127 -; CHECK-NEXT: fmov s3, w12 -; CHECK-NEXT: fcvtzs w12, d4 -; CHECK-NEXT: mov v0.b[4], v2.b[0] -; CHECK-NEXT: mov d4, v5.d[1] -; CHECK-NEXT: mov v3.s[1], w10 -; CHECK-NEXT: csel w10, w11, w8, lt -; CHECK-NEXT: cmn w10, #128 -; CHECK-NEXT: mov v0.b[5], w13 -; CHECK-NEXT: csel w10, w10, w9, gt -; CHECK-NEXT: cmp w12, #127 -; CHECK-NEXT: fcvtzs w11, d4 -; CHECK-NEXT: csel w12, w12, w8, lt -; CHECK-NEXT: cmn w12, #128 -; CHECK-NEXT: mov w13, v3.s[1] -; CHECK-NEXT: csel w12, w12, w9, gt -; CHECK-NEXT: mov v0.b[6], v3.b[0] -; CHECK-NEXT: fmov s4, w12 -; CHECK-NEXT: fcvtzs w12, d5 -; CHECK-NEXT: cmp w11, #127 -; CHECK-NEXT: mov d5, v6.d[1] -; CHECK-NEXT: mov v4.s[1], w10 -; CHECK-NEXT: csel w10, w11, w8, lt -; CHECK-NEXT: mov v0.b[7], w13 -; CHECK-NEXT: cmn w10, #128 -; CHECK-NEXT: csel w10, w10, w9, gt -; CHECK-NEXT: cmp w12, #127 -; CHECK-NEXT: fcvtzs w13, d5 -; CHECK-NEXT: csel w11, w12, w8, lt -; CHECK-NEXT: cmn w11, #128 -; CHECK-NEXT: mov w12, v4.s[1] -; CHECK-NEXT: mov v0.b[8], v4.b[0] -; CHECK-NEXT: csel w11, w11, w9, gt -; CHECK-NEXT: fmov s5, w11 -; CHECK-NEXT: fcvtzs w11, d6 -; CHECK-NEXT: cmp w13, #127 -; CHECK-NEXT: mov d6, v7.d[1] -; CHECK-NEXT: mov v0.b[9], w12 -; CHECK-NEXT: mov v5.s[1], w10 -; CHECK-NEXT: csel w10, w13, w8, lt -; CHECK-NEXT: cmn w10, #128 -; CHECK-NEXT: csel w10, w10, w9, gt -; CHECK-NEXT: cmp w11, #127 -; CHECK-NEXT: fcvtzs w13, d6 -; CHECK-NEXT: csel w11, w11, w8, lt -; CHECK-NEXT: cmn w11, #128 -; CHECK-NEXT: mov v0.b[10], v5.b[0] -; CHECK-NEXT: mov w12, v5.s[1] -; CHECK-NEXT: csel w11, w11, w9, gt -; CHECK-NEXT: fmov s6, w11 -; CHECK-NEXT: fcvtzs w11, d7 -; CHECK-NEXT: cmp w13, #127 -; CHECK-NEXT: mov v0.b[11], w12 -; CHECK-NEXT: mov v6.s[1], w10 -; CHECK-NEXT: csel w10, w13, w8, lt -; CHECK-NEXT: cmn w10, #128 -; CHECK-NEXT: csel w10, w10, w9, gt -; CHECK-NEXT: cmp w11, #127 -; CHECK-NEXT: csel w8, w11, w8, lt -; CHECK-NEXT: cmn w8, #128 -; CHECK-NEXT: mov v0.b[12], v6.b[0] -; CHECK-NEXT: mov w11, v6.s[1] -; CHECK-NEXT: csel w8, w8, w9, gt -; CHECK-NEXT: fmov s7, w8 -; CHECK-NEXT: mov v0.b[13], w11 -; CHECK-NEXT: mov v7.s[1], w10 -; CHECK-NEXT: mov v0.b[14], v7.b[0] -; CHECK-NEXT: mov w8, v7.s[1] -; CHECK-NEXT: mov v0.b[15], w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v16f64_v16i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d16, v0.d[1] +; CHECK-SD-NEXT: fcvtzs w10, d0 +; CHECK-SD-NEXT: mov w8, #127 // =0x7f +; CHECK-SD-NEXT: mov d0, v1.d[1] +; CHECK-SD-NEXT: fcvtzs w13, d1 +; CHECK-SD-NEXT: mov d1, v2.d[1] +; CHECK-SD-NEXT: fcvtzs w9, d16 +; CHECK-SD-NEXT: fcvtzs w12, d0 +; CHECK-SD-NEXT: cmp w9, #127 +; CHECK-SD-NEXT: csel w11, w9, w8, lt +; CHECK-SD-NEXT: mov w9, #-128 // =0xffffff80 +; CHECK-SD-NEXT: cmn w11, #128 +; CHECK-SD-NEXT: csel w11, w11, w9, gt +; CHECK-SD-NEXT: cmp w10, #127 +; CHECK-SD-NEXT: csel w10, w10, w8, lt +; CHECK-SD-NEXT: cmn w10, #128 +; CHECK-SD-NEXT: csel w10, w10, w9, gt +; CHECK-SD-NEXT: cmp w12, #127 +; CHECK-SD-NEXT: fmov s0, w10 +; CHECK-SD-NEXT: csel w10, w12, w8, lt +; CHECK-SD-NEXT: cmn w10, #128 +; CHECK-SD-NEXT: csel w10, w10, w9, gt +; CHECK-SD-NEXT: cmp w13, #127 +; CHECK-SD-NEXT: csel w12, w13, w8, lt +; CHECK-SD-NEXT: mov v0.s[1], w11 +; CHECK-SD-NEXT: fcvtzs w11, d1 +; CHECK-SD-NEXT: cmn w12, #128 +; CHECK-SD-NEXT: csel w12, w12, w9, gt +; CHECK-SD-NEXT: fmov s1, w12 +; CHECK-SD-NEXT: fcvtzs w12, d2 +; CHECK-SD-NEXT: mov d2, v3.d[1] +; CHECK-SD-NEXT: cmp w11, #127 +; CHECK-SD-NEXT: mov w13, v0.s[1] +; CHECK-SD-NEXT: mov v1.s[1], w10 +; CHECK-SD-NEXT: csel w10, w11, w8, lt +; CHECK-SD-NEXT: cmn w10, #128 +; CHECK-SD-NEXT: fcvtzs w11, d2 +; CHECK-SD-NEXT: csel w10, w10, w9, gt +; CHECK-SD-NEXT: cmp w12, #127 +; CHECK-SD-NEXT: mov v0.b[1], w13 +; CHECK-SD-NEXT: csel w12, w12, w8, lt +; CHECK-SD-NEXT: cmn w12, #128 +; CHECK-SD-NEXT: mov w13, v1.s[1] +; CHECK-SD-NEXT: csel w12, w12, w9, gt +; CHECK-SD-NEXT: cmp w11, #127 +; CHECK-SD-NEXT: fmov s2, w12 +; CHECK-SD-NEXT: fcvtzs w12, d3 +; CHECK-SD-NEXT: mov d3, v4.d[1] +; CHECK-SD-NEXT: mov v0.b[2], v1.b[0] +; CHECK-SD-NEXT: mov v2.s[1], w10 +; CHECK-SD-NEXT: csel w10, w11, w8, lt +; CHECK-SD-NEXT: cmn w10, #128 +; CHECK-SD-NEXT: fcvtzs w11, d3 +; CHECK-SD-NEXT: csel w10, w10, w9, gt +; CHECK-SD-NEXT: cmp w12, #127 +; CHECK-SD-NEXT: mov v0.b[3], w13 +; CHECK-SD-NEXT: csel w12, w12, w8, lt +; CHECK-SD-NEXT: cmn w12, #128 +; CHECK-SD-NEXT: mov w13, v2.s[1] +; CHECK-SD-NEXT: csel w12, w12, w9, gt +; CHECK-SD-NEXT: cmp w11, #127 +; CHECK-SD-NEXT: fmov s3, w12 +; CHECK-SD-NEXT: fcvtzs w12, d4 +; CHECK-SD-NEXT: mov v0.b[4], v2.b[0] +; CHECK-SD-NEXT: mov d4, v5.d[1] +; CHECK-SD-NEXT: mov v3.s[1], w10 +; CHECK-SD-NEXT: csel w10, w11, w8, lt +; CHECK-SD-NEXT: cmn w10, #128 +; CHECK-SD-NEXT: mov v0.b[5], w13 +; CHECK-SD-NEXT: csel w10, w10, w9, gt +; CHECK-SD-NEXT: cmp w12, #127 +; CHECK-SD-NEXT: fcvtzs w11, d4 +; CHECK-SD-NEXT: csel w12, w12, w8, lt +; CHECK-SD-NEXT: cmn w12, #128 +; CHECK-SD-NEXT: mov w13, v3.s[1] +; CHECK-SD-NEXT: csel w12, w12, w9, gt +; CHECK-SD-NEXT: mov v0.b[6], v3.b[0] +; CHECK-SD-NEXT: fmov s4, w12 +; CHECK-SD-NEXT: fcvtzs w12, d5 +; CHECK-SD-NEXT: cmp w11, #127 +; CHECK-SD-NEXT: mov d5, v6.d[1] +; CHECK-SD-NEXT: mov v4.s[1], w10 +; CHECK-SD-NEXT: csel w10, w11, w8, lt +; CHECK-SD-NEXT: mov v0.b[7], w13 +; CHECK-SD-NEXT: cmn w10, #128 +; CHECK-SD-NEXT: csel w10, w10, w9, gt +; CHECK-SD-NEXT: cmp w12, #127 +; CHECK-SD-NEXT: fcvtzs w13, d5 +; CHECK-SD-NEXT: csel w11, w12, w8, lt +; CHECK-SD-NEXT: cmn w11, #128 +; CHECK-SD-NEXT: mov w12, v4.s[1] +; CHECK-SD-NEXT: mov v0.b[8], v4.b[0] +; CHECK-SD-NEXT: csel w11, w11, w9, gt +; CHECK-SD-NEXT: fmov s5, w11 +; CHECK-SD-NEXT: fcvtzs w11, d6 +; CHECK-SD-NEXT: cmp w13, #127 +; CHECK-SD-NEXT: mov d6, v7.d[1] +; CHECK-SD-NEXT: mov v0.b[9], w12 +; CHECK-SD-NEXT: mov v5.s[1], w10 +; CHECK-SD-NEXT: csel w10, w13, w8, lt +; CHECK-SD-NEXT: cmn w10, #128 +; CHECK-SD-NEXT: csel w10, w10, w9, gt +; CHECK-SD-NEXT: cmp w11, #127 +; CHECK-SD-NEXT: fcvtzs w13, d6 +; CHECK-SD-NEXT: csel w11, w11, w8, lt +; CHECK-SD-NEXT: cmn w11, #128 +; CHECK-SD-NEXT: mov v0.b[10], v5.b[0] +; CHECK-SD-NEXT: mov w12, v5.s[1] +; CHECK-SD-NEXT: csel w11, w11, w9, gt +; CHECK-SD-NEXT: fmov s6, w11 +; CHECK-SD-NEXT: fcvtzs w11, d7 +; CHECK-SD-NEXT: cmp w13, #127 +; CHECK-SD-NEXT: mov v0.b[11], w12 +; CHECK-SD-NEXT: mov v6.s[1], w10 +; CHECK-SD-NEXT: csel w10, w13, w8, lt +; CHECK-SD-NEXT: cmn w10, #128 +; CHECK-SD-NEXT: csel w10, w10, w9, gt +; CHECK-SD-NEXT: cmp w11, #127 +; CHECK-SD-NEXT: csel w8, w11, w8, lt +; CHECK-SD-NEXT: cmn w8, #128 +; CHECK-SD-NEXT: mov v0.b[12], v6.b[0] +; CHECK-SD-NEXT: mov w11, v6.s[1] +; CHECK-SD-NEXT: csel w8, w8, w9, gt +; CHECK-SD-NEXT: fmov s7, w8 +; CHECK-SD-NEXT: mov v0.b[13], w11 +; CHECK-SD-NEXT: mov v7.s[1], w10 +; CHECK-SD-NEXT: mov v0.b[14], v7.b[0] +; CHECK-SD-NEXT: mov w8, v7.s[1] +; CHECK-SD-NEXT: mov v0.b[15], w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v16f64_v16i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: fcvtzs v1.2d, v1.2d +; CHECK-GI-NEXT: adrp x8, .LCPI83_1 +; CHECK-GI-NEXT: fcvtzs v2.2d, v2.2d +; CHECK-GI-NEXT: fcvtzs v3.2d, v3.2d +; CHECK-GI-NEXT: ldr q16, [x8, :lo12:.LCPI83_1] +; CHECK-GI-NEXT: fcvtzs v4.2d, v4.2d +; CHECK-GI-NEXT: fcvtzs v5.2d, v5.2d +; CHECK-GI-NEXT: adrp x8, .LCPI83_0 +; CHECK-GI-NEXT: fcvtzs v6.2d, v6.2d +; CHECK-GI-NEXT: fcvtzs v7.2d, v7.2d +; CHECK-GI-NEXT: cmgt v17.2d, v16.2d, v0.2d +; CHECK-GI-NEXT: cmgt v18.2d, v16.2d, v1.2d +; CHECK-GI-NEXT: cmgt v19.2d, v16.2d, v2.2d +; CHECK-GI-NEXT: cmgt v20.2d, v16.2d, v3.2d +; CHECK-GI-NEXT: cmgt v21.2d, v16.2d, v4.2d +; CHECK-GI-NEXT: cmgt v22.2d, v16.2d, v5.2d +; CHECK-GI-NEXT: cmgt v23.2d, v16.2d, v6.2d +; CHECK-GI-NEXT: cmgt v24.2d, v16.2d, v7.2d +; CHECK-GI-NEXT: bif v0.16b, v16.16b, v17.16b +; CHECK-GI-NEXT: bif v1.16b, v16.16b, v18.16b +; CHECK-GI-NEXT: ldr q17, [x8, :lo12:.LCPI83_0] +; CHECK-GI-NEXT: bif v2.16b, v16.16b, v19.16b +; CHECK-GI-NEXT: bif v3.16b, v16.16b, v20.16b +; CHECK-GI-NEXT: bif v4.16b, v16.16b, v21.16b +; CHECK-GI-NEXT: bif v5.16b, v16.16b, v22.16b +; CHECK-GI-NEXT: bif v6.16b, v16.16b, v23.16b +; CHECK-GI-NEXT: bif v7.16b, v16.16b, v24.16b +; CHECK-GI-NEXT: cmgt v16.2d, v0.2d, v17.2d +; CHECK-GI-NEXT: cmgt v18.2d, v1.2d, v17.2d +; CHECK-GI-NEXT: cmgt v19.2d, v2.2d, v17.2d +; CHECK-GI-NEXT: cmgt v20.2d, v3.2d, v17.2d +; CHECK-GI-NEXT: cmgt v21.2d, v4.2d, v17.2d +; CHECK-GI-NEXT: cmgt v22.2d, v5.2d, v17.2d +; CHECK-GI-NEXT: cmgt v23.2d, v6.2d, v17.2d +; CHECK-GI-NEXT: cmgt v24.2d, v7.2d, v17.2d +; CHECK-GI-NEXT: bif v0.16b, v17.16b, v16.16b +; CHECK-GI-NEXT: bif v1.16b, v17.16b, v18.16b +; CHECK-GI-NEXT: bif v2.16b, v17.16b, v19.16b +; CHECK-GI-NEXT: bif v3.16b, v17.16b, v20.16b +; CHECK-GI-NEXT: bif v4.16b, v17.16b, v21.16b +; CHECK-GI-NEXT: bif v5.16b, v17.16b, v22.16b +; CHECK-GI-NEXT: bif v6.16b, v17.16b, v23.16b +; CHECK-GI-NEXT: bif v7.16b, v17.16b, v24.16b +; CHECK-GI-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: uzp1 v1.4s, v2.4s, v3.4s +; CHECK-GI-NEXT: uzp1 v2.4s, v4.4s, v5.4s +; CHECK-GI-NEXT: uzp1 v3.4s, v6.4s, v7.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: uzp1 v1.8h, v2.8h, v3.8h +; CHECK-GI-NEXT: uzp1 v0.16b, v0.16b, v1.16b +; CHECK-GI-NEXT: ret %x = call <16 x i8> @llvm.fptosi.sat.v16f64.v16i8(<16 x double> %f) ret <16 x i8> %x } define <8 x i16> @test_signed_v8f64_v8i16(<8 x double> %f) { -; CHECK-LABEL: test_signed_v8f64_v8i16: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d4, v3.d[1] -; CHECK-NEXT: mov w8, #32767 // =0x7fff -; CHECK-NEXT: fcvtzs w11, d3 -; CHECK-NEXT: mov d3, v1.d[1] -; CHECK-NEXT: fcvtzs w13, d2 -; CHECK-NEXT: fcvtzs w15, d1 -; CHECK-NEXT: fcvtzs w17, d0 -; CHECK-NEXT: fcvtzs w9, d4 -; CHECK-NEXT: mov d4, v2.d[1] -; CHECK-NEXT: mov d2, v0.d[1] -; CHECK-NEXT: fcvtzs w14, d3 -; CHECK-NEXT: cmp w9, w8 -; CHECK-NEXT: fcvtzs w12, d4 -; CHECK-NEXT: fcvtzs w16, d2 -; CHECK-NEXT: csel w10, w9, w8, lt -; CHECK-NEXT: mov w9, #-32768 // =0xffff8000 -; CHECK-NEXT: cmn w10, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w10, w10, w9, gt -; CHECK-NEXT: cmp w11, w8 -; CHECK-NEXT: csel w11, w11, w8, lt -; CHECK-NEXT: cmn w11, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w11, w11, w9, gt -; CHECK-NEXT: cmp w12, w8 -; CHECK-NEXT: csel w12, w12, w8, lt -; CHECK-NEXT: fmov s3, w11 -; CHECK-NEXT: cmn w12, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w12, w12, w9, gt -; CHECK-NEXT: cmp w13, w8 -; CHECK-NEXT: csel w13, w13, w8, lt -; CHECK-NEXT: mov v3.s[1], w10 -; CHECK-NEXT: cmn w13, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w13, w13, w9, gt -; CHECK-NEXT: cmp w14, w8 -; CHECK-NEXT: csel w14, w14, w8, lt -; CHECK-NEXT: fmov s2, w13 -; CHECK-NEXT: cmn w14, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w14, w14, w9, gt -; CHECK-NEXT: cmp w15, w8 -; CHECK-NEXT: csel w15, w15, w8, lt -; CHECK-NEXT: mov v2.s[1], w12 -; CHECK-NEXT: cmn w15, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w15, w15, w9, gt -; CHECK-NEXT: cmp w16, w8 -; CHECK-NEXT: csel w11, w16, w8, lt -; CHECK-NEXT: fmov s1, w15 -; CHECK-NEXT: cmn w11, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w10, w11, w9, gt -; CHECK-NEXT: cmp w17, w8 -; CHECK-NEXT: csel w8, w17, w8, lt -; CHECK-NEXT: mov v1.s[1], w14 -; CHECK-NEXT: cmn w8, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w8, w8, w9, gt -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: adrp x8, .LCPI84_0 -; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI84_0] -; CHECK-NEXT: mov v0.s[1], w10 -; CHECK-NEXT: tbl v0.16b, { v0.16b, v1.16b, v2.16b, v3.16b }, v4.16b -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v8f64_v8i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d4, v3.d[1] +; CHECK-SD-NEXT: mov w8, #32767 // =0x7fff +; CHECK-SD-NEXT: fcvtzs w11, d3 +; CHECK-SD-NEXT: mov d3, v1.d[1] +; CHECK-SD-NEXT: fcvtzs w13, d2 +; CHECK-SD-NEXT: fcvtzs w15, d1 +; CHECK-SD-NEXT: fcvtzs w17, d0 +; CHECK-SD-NEXT: fcvtzs w9, d4 +; CHECK-SD-NEXT: mov d4, v2.d[1] +; CHECK-SD-NEXT: mov d2, v0.d[1] +; CHECK-SD-NEXT: fcvtzs w14, d3 +; CHECK-SD-NEXT: cmp w9, w8 +; CHECK-SD-NEXT: fcvtzs w12, d4 +; CHECK-SD-NEXT: fcvtzs w16, d2 +; CHECK-SD-NEXT: csel w10, w9, w8, lt +; CHECK-SD-NEXT: mov w9, #-32768 // =0xffff8000 +; CHECK-SD-NEXT: cmn w10, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w10, w10, w9, gt +; CHECK-SD-NEXT: cmp w11, w8 +; CHECK-SD-NEXT: csel w11, w11, w8, lt +; CHECK-SD-NEXT: cmn w11, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w11, w11, w9, gt +; CHECK-SD-NEXT: cmp w12, w8 +; CHECK-SD-NEXT: csel w12, w12, w8, lt +; CHECK-SD-NEXT: fmov s3, w11 +; CHECK-SD-NEXT: cmn w12, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w12, w12, w9, gt +; CHECK-SD-NEXT: cmp w13, w8 +; CHECK-SD-NEXT: csel w13, w13, w8, lt +; CHECK-SD-NEXT: mov v3.s[1], w10 +; CHECK-SD-NEXT: cmn w13, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w13, w13, w9, gt +; CHECK-SD-NEXT: cmp w14, w8 +; CHECK-SD-NEXT: csel w14, w14, w8, lt +; CHECK-SD-NEXT: fmov s2, w13 +; CHECK-SD-NEXT: cmn w14, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w14, w14, w9, gt +; CHECK-SD-NEXT: cmp w15, w8 +; CHECK-SD-NEXT: csel w15, w15, w8, lt +; CHECK-SD-NEXT: mov v2.s[1], w12 +; CHECK-SD-NEXT: cmn w15, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w15, w15, w9, gt +; CHECK-SD-NEXT: cmp w16, w8 +; CHECK-SD-NEXT: csel w11, w16, w8, lt +; CHECK-SD-NEXT: fmov s1, w15 +; CHECK-SD-NEXT: cmn w11, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w10, w11, w9, gt +; CHECK-SD-NEXT: cmp w17, w8 +; CHECK-SD-NEXT: csel w8, w17, w8, lt +; CHECK-SD-NEXT: mov v1.s[1], w14 +; CHECK-SD-NEXT: cmn w8, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w8, w8, w9, gt +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: adrp x8, .LCPI84_0 +; CHECK-SD-NEXT: ldr q4, [x8, :lo12:.LCPI84_0] +; CHECK-SD-NEXT: mov v0.s[1], w10 +; CHECK-SD-NEXT: tbl v0.16b, { v0.16b, v1.16b, v2.16b, v3.16b }, v4.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v8f64_v8i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: fcvtzs v1.2d, v1.2d +; CHECK-GI-NEXT: adrp x8, .LCPI84_1 +; CHECK-GI-NEXT: fcvtzs v2.2d, v2.2d +; CHECK-GI-NEXT: fcvtzs v3.2d, v3.2d +; CHECK-GI-NEXT: ldr q4, [x8, :lo12:.LCPI84_1] +; CHECK-GI-NEXT: adrp x8, .LCPI84_0 +; CHECK-GI-NEXT: cmgt v5.2d, v4.2d, v0.2d +; CHECK-GI-NEXT: cmgt v6.2d, v4.2d, v1.2d +; CHECK-GI-NEXT: cmgt v7.2d, v4.2d, v2.2d +; CHECK-GI-NEXT: cmgt v16.2d, v4.2d, v3.2d +; CHECK-GI-NEXT: bif v0.16b, v4.16b, v5.16b +; CHECK-GI-NEXT: bif v1.16b, v4.16b, v6.16b +; CHECK-GI-NEXT: bif v2.16b, v4.16b, v7.16b +; CHECK-GI-NEXT: bif v3.16b, v4.16b, v16.16b +; CHECK-GI-NEXT: ldr q4, [x8, :lo12:.LCPI84_0] +; CHECK-GI-NEXT: cmgt v5.2d, v0.2d, v4.2d +; CHECK-GI-NEXT: cmgt v6.2d, v1.2d, v4.2d +; CHECK-GI-NEXT: cmgt v7.2d, v2.2d, v4.2d +; CHECK-GI-NEXT: cmgt v16.2d, v3.2d, v4.2d +; CHECK-GI-NEXT: bif v0.16b, v4.16b, v5.16b +; CHECK-GI-NEXT: bif v1.16b, v4.16b, v6.16b +; CHECK-GI-NEXT: bif v2.16b, v4.16b, v7.16b +; CHECK-GI-NEXT: bif v3.16b, v4.16b, v16.16b +; CHECK-GI-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: uzp1 v1.4s, v2.4s, v3.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: ret %x = call <8 x i16> @llvm.fptosi.sat.v8f64.v8i16(<8 x double> %f) ret <8 x i16> %x } define <16 x i16> @test_signed_v16f64_v16i16(<16 x double> %f) { -; CHECK-LABEL: test_signed_v16f64_v16i16: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d16, v3.d[1] -; CHECK-NEXT: mov w9, #32767 // =0x7fff -; CHECK-NEXT: fcvtzs w11, d3 -; CHECK-NEXT: mov d3, v1.d[1] -; CHECK-NEXT: fcvtzs w14, d2 -; CHECK-NEXT: fcvtzs w15, d1 -; CHECK-NEXT: mov d1, v7.d[1] -; CHECK-NEXT: fcvtzs w18, d0 -; CHECK-NEXT: fcvtzs w1, d7 -; CHECK-NEXT: fcvtzs w2, d6 -; CHECK-NEXT: fcvtzs w4, d5 -; CHECK-NEXT: fcvtzs w6, d4 -; CHECK-NEXT: fcvtzs w8, d16 -; CHECK-NEXT: mov d16, v2.d[1] -; CHECK-NEXT: mov d2, v0.d[1] -; CHECK-NEXT: mov d0, v6.d[1] -; CHECK-NEXT: fcvtzs w0, d1 -; CHECK-NEXT: cmp w8, w9 -; CHECK-NEXT: fcvtzs w13, d16 -; CHECK-NEXT: fcvtzs w17, d2 -; CHECK-NEXT: csel w10, w8, w9, lt -; CHECK-NEXT: mov w8, #-32768 // =0xffff8000 -; CHECK-NEXT: cmn w10, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w10, w10, w8, gt -; CHECK-NEXT: cmp w11, w9 -; CHECK-NEXT: csel w11, w11, w9, lt -; CHECK-NEXT: cmn w11, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w12, w11, w8, gt -; CHECK-NEXT: cmp w13, w9 -; CHECK-NEXT: csel w11, w13, w9, lt -; CHECK-NEXT: fcvtzs w13, d3 -; CHECK-NEXT: cmn w11, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w11, w11, w8, gt -; CHECK-NEXT: cmp w14, w9 -; CHECK-NEXT: csel w14, w14, w9, lt -; CHECK-NEXT: cmn w14, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w14, w14, w8, gt -; CHECK-NEXT: cmp w13, w9 -; CHECK-NEXT: csel w13, w13, w9, lt -; CHECK-NEXT: cmn w13, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w13, w13, w8, gt -; CHECK-NEXT: cmp w15, w9 -; CHECK-NEXT: csel w15, w15, w9, lt -; CHECK-NEXT: cmn w15, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w16, w15, w8, gt -; CHECK-NEXT: cmp w17, w9 -; CHECK-NEXT: csel w15, w17, w9, lt -; CHECK-NEXT: cmn w15, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w15, w15, w8, gt -; CHECK-NEXT: cmp w18, w9 -; CHECK-NEXT: csel w17, w18, w9, lt -; CHECK-NEXT: cmn w17, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w17, w17, w8, gt -; CHECK-NEXT: cmp w0, w9 -; CHECK-NEXT: csel w18, w0, w9, lt -; CHECK-NEXT: fcvtzs w0, d0 -; CHECK-NEXT: mov d0, v5.d[1] -; CHECK-NEXT: cmn w18, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w18, w18, w8, gt -; CHECK-NEXT: cmp w1, w9 -; CHECK-NEXT: csel w1, w1, w9, lt -; CHECK-NEXT: cmn w1, #8, lsl #12 // =32768 -; CHECK-NEXT: fcvtzs w3, d0 -; CHECK-NEXT: mov d0, v4.d[1] -; CHECK-NEXT: csel w1, w1, w8, gt -; CHECK-NEXT: cmp w0, w9 -; CHECK-NEXT: csel w0, w0, w9, lt -; CHECK-NEXT: fmov s7, w1 -; CHECK-NEXT: cmn w0, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w0, w0, w8, gt -; CHECK-NEXT: cmp w2, w9 -; CHECK-NEXT: fcvtzs w5, d0 -; CHECK-NEXT: csel w2, w2, w9, lt -; CHECK-NEXT: fmov s3, w12 -; CHECK-NEXT: mov v7.s[1], w18 -; CHECK-NEXT: cmn w2, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w2, w2, w8, gt -; CHECK-NEXT: cmp w3, w9 -; CHECK-NEXT: csel w3, w3, w9, lt -; CHECK-NEXT: mov v3.s[1], w10 -; CHECK-NEXT: fmov s6, w2 -; CHECK-NEXT: cmn w3, #8, lsl #12 // =32768 -; CHECK-NEXT: fmov s2, w14 -; CHECK-NEXT: csel w3, w3, w8, gt -; CHECK-NEXT: cmp w4, w9 -; CHECK-NEXT: csel w4, w4, w9, lt -; CHECK-NEXT: mov v6.s[1], w0 -; CHECK-NEXT: cmn w4, #8, lsl #12 // =32768 -; CHECK-NEXT: mov v2.s[1], w11 -; CHECK-NEXT: csel w12, w4, w8, gt -; CHECK-NEXT: cmp w5, w9 -; CHECK-NEXT: fmov s1, w16 -; CHECK-NEXT: csel w10, w5, w9, lt -; CHECK-NEXT: fmov s5, w12 -; CHECK-NEXT: cmn w10, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w10, w10, w8, gt -; CHECK-NEXT: cmp w6, w9 -; CHECK-NEXT: mov v1.s[1], w13 -; CHECK-NEXT: csel w9, w6, w9, lt -; CHECK-NEXT: mov v5.s[1], w3 -; CHECK-NEXT: fmov s0, w17 -; CHECK-NEXT: cmn w9, #8, lsl #12 // =32768 -; CHECK-NEXT: csel w8, w9, w8, gt -; CHECK-NEXT: fmov s4, w8 -; CHECK-NEXT: mov v0.s[1], w15 -; CHECK-NEXT: adrp x8, .LCPI85_0 -; CHECK-NEXT: ldr q16, [x8, :lo12:.LCPI85_0] -; CHECK-NEXT: mov v4.s[1], w10 -; CHECK-NEXT: tbl v0.16b, { v0.16b, v1.16b, v2.16b, v3.16b }, v16.16b -; CHECK-NEXT: tbl v1.16b, { v4.16b, v5.16b, v6.16b, v7.16b }, v16.16b -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_signed_v16f64_v16i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d16, v3.d[1] +; CHECK-SD-NEXT: mov w9, #32767 // =0x7fff +; CHECK-SD-NEXT: fcvtzs w11, d3 +; CHECK-SD-NEXT: mov d3, v1.d[1] +; CHECK-SD-NEXT: fcvtzs w14, d2 +; CHECK-SD-NEXT: fcvtzs w15, d1 +; CHECK-SD-NEXT: mov d1, v7.d[1] +; CHECK-SD-NEXT: fcvtzs w18, d0 +; CHECK-SD-NEXT: fcvtzs w1, d7 +; CHECK-SD-NEXT: fcvtzs w2, d6 +; CHECK-SD-NEXT: fcvtzs w4, d5 +; CHECK-SD-NEXT: fcvtzs w6, d4 +; CHECK-SD-NEXT: fcvtzs w8, d16 +; CHECK-SD-NEXT: mov d16, v2.d[1] +; CHECK-SD-NEXT: mov d2, v0.d[1] +; CHECK-SD-NEXT: mov d0, v6.d[1] +; CHECK-SD-NEXT: fcvtzs w0, d1 +; CHECK-SD-NEXT: cmp w8, w9 +; CHECK-SD-NEXT: fcvtzs w13, d16 +; CHECK-SD-NEXT: fcvtzs w17, d2 +; CHECK-SD-NEXT: csel w10, w8, w9, lt +; CHECK-SD-NEXT: mov w8, #-32768 // =0xffff8000 +; CHECK-SD-NEXT: cmn w10, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w10, w10, w8, gt +; CHECK-SD-NEXT: cmp w11, w9 +; CHECK-SD-NEXT: csel w11, w11, w9, lt +; CHECK-SD-NEXT: cmn w11, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w12, w11, w8, gt +; CHECK-SD-NEXT: cmp w13, w9 +; CHECK-SD-NEXT: csel w11, w13, w9, lt +; CHECK-SD-NEXT: fcvtzs w13, d3 +; CHECK-SD-NEXT: cmn w11, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w11, w11, w8, gt +; CHECK-SD-NEXT: cmp w14, w9 +; CHECK-SD-NEXT: csel w14, w14, w9, lt +; CHECK-SD-NEXT: cmn w14, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w14, w14, w8, gt +; CHECK-SD-NEXT: cmp w13, w9 +; CHECK-SD-NEXT: csel w13, w13, w9, lt +; CHECK-SD-NEXT: cmn w13, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w13, w13, w8, gt +; CHECK-SD-NEXT: cmp w15, w9 +; CHECK-SD-NEXT: csel w15, w15, w9, lt +; CHECK-SD-NEXT: cmn w15, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w16, w15, w8, gt +; CHECK-SD-NEXT: cmp w17, w9 +; CHECK-SD-NEXT: csel w15, w17, w9, lt +; CHECK-SD-NEXT: cmn w15, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w15, w15, w8, gt +; CHECK-SD-NEXT: cmp w18, w9 +; CHECK-SD-NEXT: csel w17, w18, w9, lt +; CHECK-SD-NEXT: cmn w17, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w17, w17, w8, gt +; CHECK-SD-NEXT: cmp w0, w9 +; CHECK-SD-NEXT: csel w18, w0, w9, lt +; CHECK-SD-NEXT: fcvtzs w0, d0 +; CHECK-SD-NEXT: mov d0, v5.d[1] +; CHECK-SD-NEXT: cmn w18, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w18, w18, w8, gt +; CHECK-SD-NEXT: cmp w1, w9 +; CHECK-SD-NEXT: csel w1, w1, w9, lt +; CHECK-SD-NEXT: cmn w1, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: fcvtzs w3, d0 +; CHECK-SD-NEXT: mov d0, v4.d[1] +; CHECK-SD-NEXT: csel w1, w1, w8, gt +; CHECK-SD-NEXT: cmp w0, w9 +; CHECK-SD-NEXT: csel w0, w0, w9, lt +; CHECK-SD-NEXT: fmov s7, w1 +; CHECK-SD-NEXT: cmn w0, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w0, w0, w8, gt +; CHECK-SD-NEXT: cmp w2, w9 +; CHECK-SD-NEXT: fcvtzs w5, d0 +; CHECK-SD-NEXT: csel w2, w2, w9, lt +; CHECK-SD-NEXT: fmov s3, w12 +; CHECK-SD-NEXT: mov v7.s[1], w18 +; CHECK-SD-NEXT: cmn w2, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w2, w2, w8, gt +; CHECK-SD-NEXT: cmp w3, w9 +; CHECK-SD-NEXT: csel w3, w3, w9, lt +; CHECK-SD-NEXT: mov v3.s[1], w10 +; CHECK-SD-NEXT: fmov s6, w2 +; CHECK-SD-NEXT: cmn w3, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: fmov s2, w14 +; CHECK-SD-NEXT: csel w3, w3, w8, gt +; CHECK-SD-NEXT: cmp w4, w9 +; CHECK-SD-NEXT: csel w4, w4, w9, lt +; CHECK-SD-NEXT: mov v6.s[1], w0 +; CHECK-SD-NEXT: cmn w4, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: mov v2.s[1], w11 +; CHECK-SD-NEXT: csel w12, w4, w8, gt +; CHECK-SD-NEXT: cmp w5, w9 +; CHECK-SD-NEXT: fmov s1, w16 +; CHECK-SD-NEXT: csel w10, w5, w9, lt +; CHECK-SD-NEXT: fmov s5, w12 +; CHECK-SD-NEXT: cmn w10, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w10, w10, w8, gt +; CHECK-SD-NEXT: cmp w6, w9 +; CHECK-SD-NEXT: mov v1.s[1], w13 +; CHECK-SD-NEXT: csel w9, w6, w9, lt +; CHECK-SD-NEXT: mov v5.s[1], w3 +; CHECK-SD-NEXT: fmov s0, w17 +; CHECK-SD-NEXT: cmn w9, #8, lsl #12 // =32768 +; CHECK-SD-NEXT: csel w8, w9, w8, gt +; CHECK-SD-NEXT: fmov s4, w8 +; CHECK-SD-NEXT: mov v0.s[1], w15 +; CHECK-SD-NEXT: adrp x8, .LCPI85_0 +; CHECK-SD-NEXT: ldr q16, [x8, :lo12:.LCPI85_0] +; CHECK-SD-NEXT: mov v4.s[1], w10 +; CHECK-SD-NEXT: tbl v0.16b, { v0.16b, v1.16b, v2.16b, v3.16b }, v16.16b +; CHECK-SD-NEXT: tbl v1.16b, { v4.16b, v5.16b, v6.16b, v7.16b }, v16.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_signed_v16f64_v16i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzs v0.2d, v0.2d +; CHECK-GI-NEXT: fcvtzs v1.2d, v1.2d +; CHECK-GI-NEXT: adrp x8, .LCPI85_1 +; CHECK-GI-NEXT: fcvtzs v2.2d, v2.2d +; CHECK-GI-NEXT: fcvtzs v3.2d, v3.2d +; CHECK-GI-NEXT: ldr q16, [x8, :lo12:.LCPI85_1] +; CHECK-GI-NEXT: fcvtzs v4.2d, v4.2d +; CHECK-GI-NEXT: fcvtzs v5.2d, v5.2d +; CHECK-GI-NEXT: adrp x8, .LCPI85_0 +; CHECK-GI-NEXT: fcvtzs v6.2d, v6.2d +; CHECK-GI-NEXT: fcvtzs v7.2d, v7.2d +; CHECK-GI-NEXT: cmgt v17.2d, v16.2d, v0.2d +; CHECK-GI-NEXT: cmgt v18.2d, v16.2d, v1.2d +; CHECK-GI-NEXT: cmgt v19.2d, v16.2d, v2.2d +; CHECK-GI-NEXT: cmgt v20.2d, v16.2d, v3.2d +; CHECK-GI-NEXT: cmgt v21.2d, v16.2d, v4.2d +; CHECK-GI-NEXT: cmgt v22.2d, v16.2d, v5.2d +; CHECK-GI-NEXT: cmgt v23.2d, v16.2d, v6.2d +; CHECK-GI-NEXT: cmgt v24.2d, v16.2d, v7.2d +; CHECK-GI-NEXT: bif v0.16b, v16.16b, v17.16b +; CHECK-GI-NEXT: bif v1.16b, v16.16b, v18.16b +; CHECK-GI-NEXT: ldr q17, [x8, :lo12:.LCPI85_0] +; CHECK-GI-NEXT: bif v2.16b, v16.16b, v19.16b +; CHECK-GI-NEXT: bif v3.16b, v16.16b, v20.16b +; CHECK-GI-NEXT: bif v4.16b, v16.16b, v21.16b +; CHECK-GI-NEXT: bif v5.16b, v16.16b, v22.16b +; CHECK-GI-NEXT: bif v6.16b, v16.16b, v23.16b +; CHECK-GI-NEXT: bif v7.16b, v16.16b, v24.16b +; CHECK-GI-NEXT: cmgt v16.2d, v0.2d, v17.2d +; CHECK-GI-NEXT: cmgt v18.2d, v1.2d, v17.2d +; CHECK-GI-NEXT: cmgt v19.2d, v2.2d, v17.2d +; CHECK-GI-NEXT: cmgt v20.2d, v3.2d, v17.2d +; CHECK-GI-NEXT: cmgt v21.2d, v4.2d, v17.2d +; CHECK-GI-NEXT: cmgt v22.2d, v5.2d, v17.2d +; CHECK-GI-NEXT: cmgt v23.2d, v6.2d, v17.2d +; CHECK-GI-NEXT: cmgt v24.2d, v7.2d, v17.2d +; CHECK-GI-NEXT: bif v0.16b, v17.16b, v16.16b +; CHECK-GI-NEXT: bif v1.16b, v17.16b, v18.16b +; CHECK-GI-NEXT: bif v2.16b, v17.16b, v19.16b +; CHECK-GI-NEXT: bif v3.16b, v17.16b, v20.16b +; CHECK-GI-NEXT: bif v4.16b, v17.16b, v21.16b +; CHECK-GI-NEXT: bif v5.16b, v17.16b, v22.16b +; CHECK-GI-NEXT: bif v6.16b, v17.16b, v23.16b +; CHECK-GI-NEXT: bif v7.16b, v17.16b, v24.16b +; CHECK-GI-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: uzp1 v1.4s, v2.4s, v3.4s +; CHECK-GI-NEXT: uzp1 v2.4s, v4.4s, v5.4s +; CHECK-GI-NEXT: uzp1 v3.4s, v6.4s, v7.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: uzp1 v1.8h, v2.8h, v3.8h +; CHECK-GI-NEXT: ret %x = call <16 x i16> @llvm.fptosi.sat.v16f64.v16i16(<16 x double> %f) ret <16 x i16> %x } diff --git a/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll index 1e1e732..60f961f 100644 --- a/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll +++ b/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT -; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16 +; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-CVT +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16 +; RUN: llc < %s -mtriple=aarch64 -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-CVT +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16 ; ; 32-bit float to unsigned integer @@ -18,12 +20,20 @@ declare i100 @llvm.fptoui.sat.i100.f32(float) declare i128 @llvm.fptoui.sat.i128.f32(float) define i1 @test_unsigned_i1_f32(float %f) nounwind { -; CHECK-LABEL: test_unsigned_i1_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzu w8, s0 -; CHECK-NEXT: cmp w8, #1 -; CHECK-NEXT: csinc w0, w8, wzr, lo -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_i1_f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzu w8, s0 +; CHECK-SD-NEXT: cmp w8, #1 +; CHECK-SD-NEXT: csinc w0, w8, wzr, lo +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_i1_f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzu w8, s0 +; CHECK-GI-NEXT: cmp w8, #1 +; CHECK-GI-NEXT: csinc w8, w8, wzr, lo +; CHECK-GI-NEXT: and w0, w8, #0x1 +; CHECK-GI-NEXT: ret %x = call i1 @llvm.fptoui.sat.i1.f32(float %f) ret i1 %x } @@ -107,46 +117,83 @@ define i64 @test_unsigned_i64_f32(float %f) nounwind { } define i100 @test_unsigned_i100_f32(float %f) nounwind { -; CHECK-LABEL: test_unsigned_i100_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #8] // 8-byte Folded Spill -; CHECK-NEXT: fmov s8, s0 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: mov w8, #1904214015 // =0x717fffff -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: mov x10, #68719476735 // =0xfffffffff -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s0 -; CHECK-NEXT: csel x1, x10, x9, gt -; CHECK-NEXT: csinv x0, x8, xzr, le -; CHECK-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_i100_f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-SD-NEXT: fmov s8, s0 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: mov w8, #1904214015 // =0x717fffff +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: mov x10, #68719476735 // =0xfffffffff +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s0 +; CHECK-SD-NEXT: csel x1, x10, x9, gt +; CHECK-SD-NEXT: csinv x0, x8, xzr, le +; CHECK-SD-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_i100_f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-GI-NEXT: fmov s8, s0 +; CHECK-GI-NEXT: bl __fixunssfti +; CHECK-GI-NEXT: mov w8, #1904214015 // =0x717fffff +; CHECK-GI-NEXT: fcmp s8, #0.0 +; CHECK-GI-NEXT: mov x10, #68719476735 // =0xfffffffff +; CHECK-GI-NEXT: fmov s0, w8 +; CHECK-GI-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp s8, s0 +; CHECK-GI-NEXT: csinv x0, x8, xzr, le +; CHECK-GI-NEXT: csel x1, x10, x9, gt +; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-GI-NEXT: ret %x = call i100 @llvm.fptoui.sat.i100.f32(float %f) ret i100 %x } define i128 @test_unsigned_i128_f32(float %f) nounwind { -; CHECK-LABEL: test_unsigned_i128_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #8] // 8-byte Folded Spill -; CHECK-NEXT: fmov s8, s0 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: mov w8, #2139095039 // =0x7f7fffff -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s8, s0 -; CHECK-NEXT: csinv x0, x9, xzr, le -; CHECK-NEXT: csinv x1, x8, xzr, le -; CHECK-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_i128_f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-SD-NEXT: fmov s8, s0 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: mov w8, #2139095039 // =0x7f7fffff +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s8, s0 +; CHECK-SD-NEXT: csinv x0, x9, xzr, le +; CHECK-SD-NEXT: csinv x1, x8, xzr, le +; CHECK-SD-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_i128_f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-GI-NEXT: fmov s8, s0 +; CHECK-GI-NEXT: bl __fixunssfti +; CHECK-GI-NEXT: mov w8, #2139095039 // =0x7f7fffff +; CHECK-GI-NEXT: fcmp s8, #0.0 +; CHECK-GI-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-GI-NEXT: fmov s0, w8 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp s8, s0 +; CHECK-GI-NEXT: csinv x0, x8, xzr, le +; CHECK-GI-NEXT: csinv x1, x9, xzr, le +; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-GI-NEXT: ret %x = call i128 @llvm.fptoui.sat.i128.f32(float %f) ret i128 %x } @@ -167,12 +214,20 @@ declare i100 @llvm.fptoui.sat.i100.f64(double) declare i128 @llvm.fptoui.sat.i128.f64(double) define i1 @test_unsigned_i1_f64(double %f) nounwind { -; CHECK-LABEL: test_unsigned_i1_f64: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzu w8, d0 -; CHECK-NEXT: cmp w8, #1 -; CHECK-NEXT: csinc w0, w8, wzr, lo -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_i1_f64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzu w8, d0 +; CHECK-SD-NEXT: cmp w8, #1 +; CHECK-SD-NEXT: csinc w0, w8, wzr, lo +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_i1_f64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzu w8, d0 +; CHECK-GI-NEXT: cmp w8, #1 +; CHECK-GI-NEXT: csinc w8, w8, wzr, lo +; CHECK-GI-NEXT: and w0, w8, #0x1 +; CHECK-GI-NEXT: ret %x = call i1 @llvm.fptoui.sat.i1.f64(double %f) ret i1 %x } @@ -256,46 +311,83 @@ define i64 @test_unsigned_i64_f64(double %f) nounwind { } define i100 @test_unsigned_i100_f64(double %f) nounwind { -; CHECK-LABEL: test_unsigned_i100_f64: -; CHECK: // %bb.0: -; CHECK-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #8] // 8-byte Folded Spill -; CHECK-NEXT: fmov d8, d0 -; CHECK-NEXT: bl __fixunsdfti -; CHECK-NEXT: mov x8, #5057542381537067007 // =0x462fffffffffffff -; CHECK-NEXT: fcmp d8, #0.0 -; CHECK-NEXT: mov x10, #68719476735 // =0xfffffffff -; CHECK-NEXT: fmov d0, x8 -; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp d8, d0 -; CHECK-NEXT: csel x1, x10, x9, gt -; CHECK-NEXT: csinv x0, x8, xzr, le -; CHECK-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_i100_f64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-SD-NEXT: fmov d8, d0 +; CHECK-SD-NEXT: bl __fixunsdfti +; CHECK-SD-NEXT: mov x8, #5057542381537067007 // =0x462fffffffffffff +; CHECK-SD-NEXT: fcmp d8, #0.0 +; CHECK-SD-NEXT: mov x10, #68719476735 // =0xfffffffff +; CHECK-SD-NEXT: fmov d0, x8 +; CHECK-SD-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp d8, d0 +; CHECK-SD-NEXT: csel x1, x10, x9, gt +; CHECK-SD-NEXT: csinv x0, x8, xzr, le +; CHECK-SD-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_i100_f64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-GI-NEXT: fmov d8, d0 +; CHECK-GI-NEXT: bl __fixunsdfti +; CHECK-GI-NEXT: mov x8, #5057542381537067007 // =0x462fffffffffffff +; CHECK-GI-NEXT: fcmp d8, #0.0 +; CHECK-GI-NEXT: mov x10, #68719476735 // =0xfffffffff +; CHECK-GI-NEXT: fmov d0, x8 +; CHECK-GI-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp d8, d0 +; CHECK-GI-NEXT: csinv x0, x8, xzr, le +; CHECK-GI-NEXT: csel x1, x10, x9, gt +; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-GI-NEXT: ret %x = call i100 @llvm.fptoui.sat.i100.f64(double %f) ret i100 %x } define i128 @test_unsigned_i128_f64(double %f) nounwind { -; CHECK-LABEL: test_unsigned_i128_f64: -; CHECK: // %bb.0: -; CHECK-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #8] // 8-byte Folded Spill -; CHECK-NEXT: fmov d8, d0 -; CHECK-NEXT: bl __fixunsdfti -; CHECK-NEXT: mov x8, #5183643171103440895 // =0x47efffffffffffff -; CHECK-NEXT: fcmp d8, #0.0 -; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload -; CHECK-NEXT: fmov d0, x8 -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp d8, d0 -; CHECK-NEXT: csinv x0, x9, xzr, le -; CHECK-NEXT: csinv x1, x8, xzr, le -; CHECK-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_i128_f64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-SD-NEXT: fmov d8, d0 +; CHECK-SD-NEXT: bl __fixunsdfti +; CHECK-SD-NEXT: mov x8, #5183643171103440895 // =0x47efffffffffffff +; CHECK-SD-NEXT: fcmp d8, #0.0 +; CHECK-SD-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-SD-NEXT: fmov d0, x8 +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp d8, d0 +; CHECK-SD-NEXT: csinv x0, x9, xzr, le +; CHECK-SD-NEXT: csinv x1, x8, xzr, le +; CHECK-SD-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_i128_f64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-GI-NEXT: fmov d8, d0 +; CHECK-GI-NEXT: bl __fixunsdfti +; CHECK-GI-NEXT: mov x8, #5183643171103440895 // =0x47efffffffffffff +; CHECK-GI-NEXT: fcmp d8, #0.0 +; CHECK-GI-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-GI-NEXT: fmov d0, x8 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp d8, d0 +; CHECK-GI-NEXT: csinv x0, x8, xzr, le +; CHECK-GI-NEXT: csinv x1, x9, xzr, le +; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-GI-NEXT: ret %x = call i128 @llvm.fptoui.sat.i128.f64(double %f) ret i128 %x } @@ -316,202 +408,423 @@ declare i100 @llvm.fptoui.sat.i100.f16(half) declare i128 @llvm.fptoui.sat.i128.f16(half) define i1 @test_unsigned_i1_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_unsigned_i1_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: fcvtzu w8, s0 -; CHECK-CVT-NEXT: cmp w8, #1 -; CHECK-CVT-NEXT: csinc w0, w8, wzr, lo -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_i1_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu w8, h0 -; CHECK-FP16-NEXT: cmp w8, #1 -; CHECK-FP16-NEXT: csinc w0, w8, wzr, lo -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_i1_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: fcvtzu w8, s0 +; CHECK-SD-CVT-NEXT: cmp w8, #1 +; CHECK-SD-CVT-NEXT: csinc w0, w8, wzr, lo +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_i1_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu w8, h0 +; CHECK-SD-FP16-NEXT: cmp w8, #1 +; CHECK-SD-FP16-NEXT: csinc w0, w8, wzr, lo +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_i1_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: fcvtzu w8, s0 +; CHECK-GI-CVT-NEXT: cmp w8, #1 +; CHECK-GI-CVT-NEXT: csinc w8, w8, wzr, lo +; CHECK-GI-CVT-NEXT: and w0, w8, #0x1 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_i1_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu w8, h0 +; CHECK-GI-FP16-NEXT: cmp w8, #1 +; CHECK-GI-FP16-NEXT: csinc w8, w8, wzr, lo +; CHECK-GI-FP16-NEXT: and w0, w8, #0x1 +; CHECK-GI-FP16-NEXT: ret %x = call i1 @llvm.fptoui.sat.i1.f16(half %f) ret i1 %x } define i8 @test_unsigned_i8_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_unsigned_i8_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: mov w8, #255 // =0xff -; CHECK-CVT-NEXT: fcvtzu w9, s0 -; CHECK-CVT-NEXT: cmp w9, #255 -; CHECK-CVT-NEXT: csel w0, w9, w8, lo -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_i8_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu w9, h0 -; CHECK-FP16-NEXT: mov w8, #255 // =0xff -; CHECK-FP16-NEXT: cmp w9, #255 -; CHECK-FP16-NEXT: csel w0, w9, w8, lo -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_i8_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: mov w8, #255 // =0xff +; CHECK-SD-CVT-NEXT: fcvtzu w9, s0 +; CHECK-SD-CVT-NEXT: cmp w9, #255 +; CHECK-SD-CVT-NEXT: csel w0, w9, w8, lo +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_i8_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu w9, h0 +; CHECK-SD-FP16-NEXT: mov w8, #255 // =0xff +; CHECK-SD-FP16-NEXT: cmp w9, #255 +; CHECK-SD-FP16-NEXT: csel w0, w9, w8, lo +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_i8_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov w8, #255 // =0xff +; CHECK-GI-CVT-NEXT: fcvtzu w9, s0 +; CHECK-GI-CVT-NEXT: cmp w9, #255 +; CHECK-GI-CVT-NEXT: csel w0, w9, w8, lo +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_i8_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu w9, h0 +; CHECK-GI-FP16-NEXT: mov w8, #255 // =0xff +; CHECK-GI-FP16-NEXT: cmp w9, #255 +; CHECK-GI-FP16-NEXT: csel w0, w9, w8, lo +; CHECK-GI-FP16-NEXT: ret %x = call i8 @llvm.fptoui.sat.i8.f16(half %f) ret i8 %x } define i13 @test_unsigned_i13_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_unsigned_i13_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: mov w9, #8191 // =0x1fff -; CHECK-CVT-NEXT: fcvtzu w8, s0 -; CHECK-CVT-NEXT: cmp w8, w9 -; CHECK-CVT-NEXT: csel w0, w8, w9, lo -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_i13_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu w8, h0 -; CHECK-FP16-NEXT: mov w9, #8191 // =0x1fff -; CHECK-FP16-NEXT: cmp w8, w9 -; CHECK-FP16-NEXT: csel w0, w8, w9, lo -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_i13_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: mov w9, #8191 // =0x1fff +; CHECK-SD-CVT-NEXT: fcvtzu w8, s0 +; CHECK-SD-CVT-NEXT: cmp w8, w9 +; CHECK-SD-CVT-NEXT: csel w0, w8, w9, lo +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_i13_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu w8, h0 +; CHECK-SD-FP16-NEXT: mov w9, #8191 // =0x1fff +; CHECK-SD-FP16-NEXT: cmp w8, w9 +; CHECK-SD-FP16-NEXT: csel w0, w8, w9, lo +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_i13_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov w9, #8191 // =0x1fff +; CHECK-GI-CVT-NEXT: fcvtzu w8, s0 +; CHECK-GI-CVT-NEXT: cmp w8, w9 +; CHECK-GI-CVT-NEXT: csel w0, w8, w9, lo +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_i13_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu w8, h0 +; CHECK-GI-FP16-NEXT: mov w9, #8191 // =0x1fff +; CHECK-GI-FP16-NEXT: cmp w8, w9 +; CHECK-GI-FP16-NEXT: csel w0, w8, w9, lo +; CHECK-GI-FP16-NEXT: ret %x = call i13 @llvm.fptoui.sat.i13.f16(half %f) ret i13 %x } define i16 @test_unsigned_i16_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_unsigned_i16_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: mov w9, #65535 // =0xffff -; CHECK-CVT-NEXT: fcvtzu w8, s0 -; CHECK-CVT-NEXT: cmp w8, w9 -; CHECK-CVT-NEXT: csel w0, w8, w9, lo -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_i16_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu w8, h0 -; CHECK-FP16-NEXT: mov w9, #65535 // =0xffff -; CHECK-FP16-NEXT: cmp w8, w9 -; CHECK-FP16-NEXT: csel w0, w8, w9, lo -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_i16_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: mov w9, #65535 // =0xffff +; CHECK-SD-CVT-NEXT: fcvtzu w8, s0 +; CHECK-SD-CVT-NEXT: cmp w8, w9 +; CHECK-SD-CVT-NEXT: csel w0, w8, w9, lo +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_i16_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu w8, h0 +; CHECK-SD-FP16-NEXT: mov w9, #65535 // =0xffff +; CHECK-SD-FP16-NEXT: cmp w8, w9 +; CHECK-SD-FP16-NEXT: csel w0, w8, w9, lo +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_i16_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov w9, #65535 // =0xffff +; CHECK-GI-CVT-NEXT: fcvtzu w8, s0 +; CHECK-GI-CVT-NEXT: cmp w8, w9 +; CHECK-GI-CVT-NEXT: csel w0, w8, w9, lo +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_i16_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu w8, h0 +; CHECK-GI-FP16-NEXT: mov w9, #65535 // =0xffff +; CHECK-GI-FP16-NEXT: cmp w8, w9 +; CHECK-GI-FP16-NEXT: csel w0, w8, w9, lo +; CHECK-GI-FP16-NEXT: ret %x = call i16 @llvm.fptoui.sat.i16.f16(half %f) ret i16 %x } define i19 @test_unsigned_i19_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_unsigned_i19_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: mov w9, #524287 // =0x7ffff -; CHECK-CVT-NEXT: fcvtzu w8, s0 -; CHECK-CVT-NEXT: cmp w8, w9 -; CHECK-CVT-NEXT: csel w0, w8, w9, lo -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_i19_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu w8, h0 -; CHECK-FP16-NEXT: mov w9, #524287 // =0x7ffff -; CHECK-FP16-NEXT: cmp w8, w9 -; CHECK-FP16-NEXT: csel w0, w8, w9, lo -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_i19_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: mov w9, #524287 // =0x7ffff +; CHECK-SD-CVT-NEXT: fcvtzu w8, s0 +; CHECK-SD-CVT-NEXT: cmp w8, w9 +; CHECK-SD-CVT-NEXT: csel w0, w8, w9, lo +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_i19_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu w8, h0 +; CHECK-SD-FP16-NEXT: mov w9, #524287 // =0x7ffff +; CHECK-SD-FP16-NEXT: cmp w8, w9 +; CHECK-SD-FP16-NEXT: csel w0, w8, w9, lo +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_i19_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov w9, #524287 // =0x7ffff +; CHECK-GI-CVT-NEXT: fcvtzu w8, s0 +; CHECK-GI-CVT-NEXT: cmp w8, w9 +; CHECK-GI-CVT-NEXT: csel w0, w8, w9, lo +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_i19_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu w8, h0 +; CHECK-GI-FP16-NEXT: mov w9, #524287 // =0x7ffff +; CHECK-GI-FP16-NEXT: cmp w8, w9 +; CHECK-GI-FP16-NEXT: csel w0, w8, w9, lo +; CHECK-GI-FP16-NEXT: ret %x = call i19 @llvm.fptoui.sat.i19.f16(half %f) ret i19 %x } define i32 @test_unsigned_i32_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_unsigned_i32_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: fcvtzu w0, s0 -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_i32_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu w0, h0 -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_i32_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: fcvtzu w0, s0 +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_i32_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu w0, h0 +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_i32_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: fcvtzu w0, s0 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_i32_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu w0, h0 +; CHECK-GI-FP16-NEXT: ret %x = call i32 @llvm.fptoui.sat.i32.f16(half %f) ret i32 %x } define i50 @test_unsigned_i50_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_unsigned_i50_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: mov x9, #1125899906842623 // =0x3ffffffffffff -; CHECK-CVT-NEXT: fcvtzu x8, s0 -; CHECK-CVT-NEXT: cmp x8, x9 -; CHECK-CVT-NEXT: csel x0, x8, x9, lo -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_i50_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu x8, h0 -; CHECK-FP16-NEXT: mov x9, #1125899906842623 // =0x3ffffffffffff -; CHECK-FP16-NEXT: cmp x8, x9 -; CHECK-FP16-NEXT: csel x0, x8, x9, lo -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_i50_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: mov x9, #1125899906842623 // =0x3ffffffffffff +; CHECK-SD-CVT-NEXT: fcvtzu x8, s0 +; CHECK-SD-CVT-NEXT: cmp x8, x9 +; CHECK-SD-CVT-NEXT: csel x0, x8, x9, lo +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_i50_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu x8, h0 +; CHECK-SD-FP16-NEXT: mov x9, #1125899906842623 // =0x3ffffffffffff +; CHECK-SD-FP16-NEXT: cmp x8, x9 +; CHECK-SD-FP16-NEXT: csel x0, x8, x9, lo +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_i50_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov x9, #1125899906842623 // =0x3ffffffffffff +; CHECK-GI-CVT-NEXT: fcvtzu x8, s0 +; CHECK-GI-CVT-NEXT: cmp x8, x9 +; CHECK-GI-CVT-NEXT: csel x0, x8, x9, lo +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_i50_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu x8, h0 +; CHECK-GI-FP16-NEXT: mov x9, #1125899906842623 // =0x3ffffffffffff +; CHECK-GI-FP16-NEXT: cmp x8, x9 +; CHECK-GI-FP16-NEXT: csel x0, x8, x9, lo +; CHECK-GI-FP16-NEXT: ret %x = call i50 @llvm.fptoui.sat.i50.f16(half %f) ret i50 %x } define i64 @test_unsigned_i64_f16(half %f) nounwind { -; CHECK-CVT-LABEL: test_unsigned_i64_f16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: fcvtzu x0, s0 -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_i64_f16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu x0, h0 -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_i64_f16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: fcvtzu x0, s0 +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_i64_f16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu x0, h0 +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_i64_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: fcvtzu x0, s0 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_i64_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu x0, h0 +; CHECK-GI-FP16-NEXT: ret %x = call i64 @llvm.fptoui.sat.i64.f16(half %f) ret i64 %x } define i100 @test_unsigned_i100_f16(half %f) nounwind { -; CHECK-LABEL: test_unsigned_i100_f16: -; CHECK: // %bb.0: -; CHECK-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: str x30, [sp, #8] // 8-byte Folded Spill -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: mov w8, #1904214015 // =0x717fffff -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: mov x10, #68719476735 // =0xfffffffff -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s0 -; CHECK-NEXT: csel x1, x10, x9, gt -; CHECK-NEXT: csinv x0, x8, xzr, le -; CHECK-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_i100_f16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: mov w8, #1904214015 // =0x717fffff +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: mov x10, #68719476735 // =0xfffffffff +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s0 +; CHECK-SD-NEXT: csel x1, x10, x9, gt +; CHECK-SD-NEXT: csinv x0, x8, xzr, le +; CHECK-SD-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-SD-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_i100_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov x1, xzr +; CHECK-GI-CVT-NEXT: fcvtzu x0, s0 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_i100_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu x0, h0 +; CHECK-GI-FP16-NEXT: mov x1, xzr +; CHECK-GI-FP16-NEXT: ret %x = call i100 @llvm.fptoui.sat.i100.f16(half %f) ret i100 %x } define i128 @test_unsigned_i128_f16(half %f) nounwind { -; CHECK-LABEL: test_unsigned_i128_f16: -; CHECK: // %bb.0: -; CHECK-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: str x30, [sp, #8] // 8-byte Folded Spill -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: mov w8, #2139095039 // =0x7f7fffff -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s8, s0 -; CHECK-NEXT: csinv x0, x9, xzr, le -; CHECK-NEXT: csinv x1, x8, xzr, le -; CHECK-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_i128_f16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: str x30, [sp, #8] // 8-byte Folded Spill +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: mov w8, #2139095039 // =0x7f7fffff +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s8, s0 +; CHECK-SD-NEXT: csinv x0, x9, xzr, le +; CHECK-SD-NEXT: csinv x1, x8, xzr, le +; CHECK-SD-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-SD-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_i128_f16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov x1, xzr +; CHECK-GI-CVT-NEXT: fcvtzu x0, s0 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_i128_f16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu x0, h0 +; CHECK-GI-FP16-NEXT: mov x1, xzr +; CHECK-GI-FP16-NEXT: ret %x = call i128 @llvm.fptoui.sat.i128.f16(half %f) ret i128 %x } + +define i32 @test_unsigned_f128_i32(fp128 %f) { +; CHECK-SD-LABEL: test_unsigned_f128_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #32 +; CHECK-SD-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 32 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w30, -16 +; CHECK-SD-NEXT: adrp x8, .LCPI30_0 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI30_0] +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixunstfsi +; CHECK-SD-NEXT: adrp x8, .LCPI30_1 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI30_1] +; CHECK-SD-NEXT: csel w19, wzr, w0, lt +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csinv w0, w19, wzr, le +; CHECK-SD-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: add sp, sp, #32 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_f128_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #64 +; CHECK-GI-NEXT: stp d9, d8, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #48] // 8-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: .cfi_offset w30, -16 +; CHECK-GI-NEXT: .cfi_offset b8, -24 +; CHECK-GI-NEXT: .cfi_offset b9, -32 +; CHECK-GI-NEXT: adrp x8, .LCPI30_1 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI30_1] +; CHECK-GI-NEXT: stp q0, q1, [sp] // 32-byte Folded Spill +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q3, q2, [sp] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v3.d[1] +; CHECK-GI-NEXT: mov d1, v2.d[1] +; CHECK-GI-NEXT: fcsel d8, d3, d2, lt +; CHECK-GI-NEXT: fmov x8, d8 +; CHECK-GI-NEXT: fcsel d9, d0, d1, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d9 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: adrp x8, .LCPI30_0 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI30_0] +; CHECK-GI-NEXT: str q1, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: fcsel d1, d8, d1, gt +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: fcsel d2, d9, d0, gt +; CHECK-GI-NEXT: ldp d9, d8, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d2 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: b __fixunstfsi + %x = call i32 @llvm.fptoui.sat.i32.f128(fp128 %f) + ret i32 %x +} diff --git a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll index a3b94bc..40a8653 100644 --- a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll +++ b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll @@ -1,6 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT -; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16 +; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-CVT +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16 +; RUN: llc < %s -mtriple=aarch64 -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-CVT +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16 + +; CHECK-GI: warning: Instruction selection used fallback path for test_unsigned_v4f32_v4i50 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for test_unsigned_v4f16_v4i50 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for test_unsigned_v8f16_v8i19 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for test_unsigned_v8f16_v8i50 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for test_unsigned_v8f16_v8i128 ; ; Float to unsigned 32-bit -- Vector size variation @@ -16,10 +24,17 @@ declare <7 x i32> @llvm.fptoui.sat.v7f32.v7i32 (<7 x float>) declare <8 x i32> @llvm.fptoui.sat.v8f32.v8i32 (<8 x float>) define <1 x i32> @test_unsigned_v1f32_v1i32(<1 x float> %f) { -; CHECK-LABEL: test_unsigned_v1f32_v1i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzu v0.2s, v0.2s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v1f32_v1i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzu v0.2s, v0.2s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v1f32_v1i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzu w8, s0 +; CHECK-GI-NEXT: mov v0.s[0], w8 +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-NEXT: ret %x = call <1 x i32> @llvm.fptoui.sat.v1f32.v1i32(<1 x float> %f) ret <1 x i32> %x } @@ -52,79 +67,157 @@ define <4 x i32> @test_unsigned_v4f32_v4i32(<4 x float> %f) { } define <5 x i32> @test_unsigned_v5f32_v5i32(<5 x float> %f) { -; CHECK-LABEL: test_unsigned_v5f32_v5i32: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: // kill: def $s1 killed $s1 def $q1 -; CHECK-NEXT: // kill: def $s2 killed $s2 def $q2 -; CHECK-NEXT: // kill: def $s3 killed $s3 def $q3 -; CHECK-NEXT: // kill: def $s4 killed $s4 def $q4 -; CHECK-NEXT: mov v0.s[1], v1.s[0] -; CHECK-NEXT: fcvtzu v4.4s, v4.4s -; CHECK-NEXT: mov v0.s[2], v2.s[0] -; CHECK-NEXT: fmov w4, s4 -; CHECK-NEXT: mov v0.s[3], v3.s[0] -; CHECK-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-NEXT: mov w1, v0.s[1] -; CHECK-NEXT: mov w2, v0.s[2] -; CHECK-NEXT: mov w3, v0.s[3] -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v5f32_v5i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-SD-NEXT: // kill: def $s1 killed $s1 def $q1 +; CHECK-SD-NEXT: // kill: def $s2 killed $s2 def $q2 +; CHECK-SD-NEXT: // kill: def $s3 killed $s3 def $q3 +; CHECK-SD-NEXT: // kill: def $s4 killed $s4 def $q4 +; CHECK-SD-NEXT: mov v0.s[1], v1.s[0] +; CHECK-SD-NEXT: fcvtzu v4.4s, v4.4s +; CHECK-SD-NEXT: mov v0.s[2], v2.s[0] +; CHECK-SD-NEXT: fmov w4, s4 +; CHECK-SD-NEXT: mov v0.s[3], v3.s[0] +; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-NEXT: mov w1, v0.s[1] +; CHECK-SD-NEXT: mov w2, v0.s[2] +; CHECK-SD-NEXT: mov w3, v0.s[3] +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v5f32_v5i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-GI-NEXT: // kill: def $s1 killed $s1 def $q1 +; CHECK-GI-NEXT: // kill: def $s2 killed $s2 def $q2 +; CHECK-GI-NEXT: // kill: def $s3 killed $s3 def $q3 +; CHECK-GI-NEXT: // kill: def $s4 killed $s4 def $q4 +; CHECK-GI-NEXT: mov v0.s[1], v1.s[0] +; CHECK-GI-NEXT: fcvtzu v1.4s, v4.4s +; CHECK-GI-NEXT: mov v0.s[2], v2.s[0] +; CHECK-GI-NEXT: fmov w4, s1 +; CHECK-GI-NEXT: mov v0.s[3], v3.s[0] +; CHECK-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-NEXT: mov s2, v0.s[1] +; CHECK-GI-NEXT: mov s3, v0.s[2] +; CHECK-GI-NEXT: mov s4, v0.s[3] +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: fmov w1, s2 +; CHECK-GI-NEXT: fmov w2, s3 +; CHECK-GI-NEXT: fmov w3, s4 +; CHECK-GI-NEXT: ret %x = call <5 x i32> @llvm.fptoui.sat.v5f32.v5i32(<5 x float> %f) ret <5 x i32> %x } define <6 x i32> @test_unsigned_v6f32_v6i32(<6 x float> %f) { -; CHECK-LABEL: test_unsigned_v6f32_v6i32: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: // kill: def $s1 killed $s1 def $q1 -; CHECK-NEXT: // kill: def $s2 killed $s2 def $q2 -; CHECK-NEXT: // kill: def $s4 killed $s4 def $q4 -; CHECK-NEXT: // kill: def $s5 killed $s5 def $q5 -; CHECK-NEXT: // kill: def $s3 killed $s3 def $q3 -; CHECK-NEXT: mov v0.s[1], v1.s[0] -; CHECK-NEXT: mov v4.s[1], v5.s[0] -; CHECK-NEXT: mov v0.s[2], v2.s[0] -; CHECK-NEXT: fcvtzu v1.4s, v4.4s -; CHECK-NEXT: mov v0.s[3], v3.s[0] -; CHECK-NEXT: mov w5, v1.s[1] -; CHECK-NEXT: fmov w4, s1 -; CHECK-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-NEXT: mov w1, v0.s[1] -; CHECK-NEXT: mov w2, v0.s[2] -; CHECK-NEXT: mov w3, v0.s[3] -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v6f32_v6i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-SD-NEXT: // kill: def $s1 killed $s1 def $q1 +; CHECK-SD-NEXT: // kill: def $s2 killed $s2 def $q2 +; CHECK-SD-NEXT: // kill: def $s4 killed $s4 def $q4 +; CHECK-SD-NEXT: // kill: def $s5 killed $s5 def $q5 +; CHECK-SD-NEXT: // kill: def $s3 killed $s3 def $q3 +; CHECK-SD-NEXT: mov v0.s[1], v1.s[0] +; CHECK-SD-NEXT: mov v4.s[1], v5.s[0] +; CHECK-SD-NEXT: mov v0.s[2], v2.s[0] +; CHECK-SD-NEXT: fcvtzu v1.4s, v4.4s +; CHECK-SD-NEXT: mov v0.s[3], v3.s[0] +; CHECK-SD-NEXT: mov w5, v1.s[1] +; CHECK-SD-NEXT: fmov w4, s1 +; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-NEXT: mov w1, v0.s[1] +; CHECK-SD-NEXT: mov w2, v0.s[2] +; CHECK-SD-NEXT: mov w3, v0.s[3] +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v6f32_v6i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-GI-NEXT: // kill: def $s1 killed $s1 def $q1 +; CHECK-GI-NEXT: // kill: def $s2 killed $s2 def $q2 +; CHECK-GI-NEXT: // kill: def $s4 killed $s4 def $q4 +; CHECK-GI-NEXT: // kill: def $s3 killed $s3 def $q3 +; CHECK-GI-NEXT: // kill: def $s5 killed $s5 def $q5 +; CHECK-GI-NEXT: mov v0.s[1], v1.s[0] +; CHECK-GI-NEXT: mov v4.s[1], v5.s[0] +; CHECK-GI-NEXT: mov v0.s[2], v2.s[0] +; CHECK-GI-NEXT: fcvtzu v1.4s, v4.4s +; CHECK-GI-NEXT: mov v0.s[3], v3.s[0] +; CHECK-GI-NEXT: mov s4, v1.s[1] +; CHECK-GI-NEXT: fmov w4, s1 +; CHECK-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-NEXT: fmov w5, s4 +; CHECK-GI-NEXT: mov s2, v0.s[1] +; CHECK-GI-NEXT: mov s3, v0.s[2] +; CHECK-GI-NEXT: mov s5, v0.s[3] +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: fmov w1, s2 +; CHECK-GI-NEXT: fmov w2, s3 +; CHECK-GI-NEXT: fmov w3, s5 +; CHECK-GI-NEXT: ret %x = call <6 x i32> @llvm.fptoui.sat.v6f32.v6i32(<6 x float> %f) ret <6 x i32> %x } define <7 x i32> @test_unsigned_v7f32_v7i32(<7 x float> %f) { -; CHECK-LABEL: test_unsigned_v7f32_v7i32: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: // kill: def $s1 killed $s1 def $q1 -; CHECK-NEXT: // kill: def $s4 killed $s4 def $q4 -; CHECK-NEXT: // kill: def $s5 killed $s5 def $q5 -; CHECK-NEXT: // kill: def $s2 killed $s2 def $q2 -; CHECK-NEXT: // kill: def $s6 killed $s6 def $q6 -; CHECK-NEXT: // kill: def $s3 killed $s3 def $q3 -; CHECK-NEXT: mov v0.s[1], v1.s[0] -; CHECK-NEXT: mov v4.s[1], v5.s[0] -; CHECK-NEXT: mov v0.s[2], v2.s[0] -; CHECK-NEXT: mov v4.s[2], v6.s[0] -; CHECK-NEXT: mov v0.s[3], v3.s[0] -; CHECK-NEXT: fcvtzu v1.4s, v4.4s -; CHECK-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-NEXT: mov w5, v1.s[1] -; CHECK-NEXT: mov w6, v1.s[2] -; CHECK-NEXT: fmov w4, s1 -; CHECK-NEXT: mov w1, v0.s[1] -; CHECK-NEXT: mov w2, v0.s[2] -; CHECK-NEXT: mov w3, v0.s[3] -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v7f32_v7i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-SD-NEXT: // kill: def $s1 killed $s1 def $q1 +; CHECK-SD-NEXT: // kill: def $s4 killed $s4 def $q4 +; CHECK-SD-NEXT: // kill: def $s5 killed $s5 def $q5 +; CHECK-SD-NEXT: // kill: def $s2 killed $s2 def $q2 +; CHECK-SD-NEXT: // kill: def $s6 killed $s6 def $q6 +; CHECK-SD-NEXT: // kill: def $s3 killed $s3 def $q3 +; CHECK-SD-NEXT: mov v0.s[1], v1.s[0] +; CHECK-SD-NEXT: mov v4.s[1], v5.s[0] +; CHECK-SD-NEXT: mov v0.s[2], v2.s[0] +; CHECK-SD-NEXT: mov v4.s[2], v6.s[0] +; CHECK-SD-NEXT: mov v0.s[3], v3.s[0] +; CHECK-SD-NEXT: fcvtzu v1.4s, v4.4s +; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-NEXT: mov w5, v1.s[1] +; CHECK-SD-NEXT: mov w6, v1.s[2] +; CHECK-SD-NEXT: fmov w4, s1 +; CHECK-SD-NEXT: mov w1, v0.s[1] +; CHECK-SD-NEXT: mov w2, v0.s[2] +; CHECK-SD-NEXT: mov w3, v0.s[3] +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v7f32_v7i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-GI-NEXT: // kill: def $s1 killed $s1 def $q1 +; CHECK-GI-NEXT: // kill: def $s4 killed $s4 def $q4 +; CHECK-GI-NEXT: // kill: def $s2 killed $s2 def $q2 +; CHECK-GI-NEXT: // kill: def $s5 killed $s5 def $q5 +; CHECK-GI-NEXT: // kill: def $s3 killed $s3 def $q3 +; CHECK-GI-NEXT: // kill: def $s6 killed $s6 def $q6 +; CHECK-GI-NEXT: mov v0.s[1], v1.s[0] +; CHECK-GI-NEXT: mov v4.s[1], v5.s[0] +; CHECK-GI-NEXT: mov v0.s[2], v2.s[0] +; CHECK-GI-NEXT: mov v4.s[2], v6.s[0] +; CHECK-GI-NEXT: mov v0.s[3], v3.s[0] +; CHECK-GI-NEXT: fcvtzu v1.4s, v4.4s +; CHECK-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-NEXT: mov s5, v1.s[1] +; CHECK-GI-NEXT: mov s6, v1.s[2] +; CHECK-GI-NEXT: fmov w4, s1 +; CHECK-GI-NEXT: mov s2, v0.s[1] +; CHECK-GI-NEXT: mov s3, v0.s[2] +; CHECK-GI-NEXT: mov s4, v0.s[3] +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: fmov w5, s5 +; CHECK-GI-NEXT: fmov w6, s6 +; CHECK-GI-NEXT: fmov w1, s2 +; CHECK-GI-NEXT: fmov w2, s3 +; CHECK-GI-NEXT: fmov w3, s4 +; CHECK-GI-NEXT: ret %x = call <7 x i32> @llvm.fptoui.sat.v7f32.v7i32(<7 x float> %f) ret <7 x i32> %x } @@ -151,86 +244,201 @@ declare <5 x i32> @llvm.fptoui.sat.v5f64.v5i32 (<5 x double>) declare <6 x i32> @llvm.fptoui.sat.v6f64.v6i32 (<6 x double>) define <1 x i32> @test_unsigned_v1f64_v1i32(<1 x double> %f) { -; CHECK-LABEL: test_unsigned_v1f64_v1i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzu w8, d0 -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v1f64_v1i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzu w8, d0 +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v1f64_v1i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzu w8, d0 +; CHECK-GI-NEXT: mov v0.s[0], w8 +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-NEXT: ret %x = call <1 x i32> @llvm.fptoui.sat.v1f64.v1i32(<1 x double> %f) ret <1 x i32> %x } define <2 x i32> @test_unsigned_v2f64_v2i32(<2 x double> %f) { -; CHECK-LABEL: test_unsigned_v2f64_v2i32: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: fcvtzu w8, d0 -; CHECK-NEXT: fcvtzu w9, d1 -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov v0.s[1], w9 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v2f64_v2i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: fcvtzu w8, d0 +; CHECK-SD-NEXT: fcvtzu w9, d1 +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov v0.s[1], w9 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v2f64_v2i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v1.2d, #0x000000ffffffff +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: cmhi v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: ret %x = call <2 x i32> @llvm.fptoui.sat.v2f64.v2i32(<2 x double> %f) ret <2 x i32> %x } define <3 x i32> @test_unsigned_v3f64_v3i32(<3 x double> %f) { -; CHECK-LABEL: test_unsigned_v3f64_v3i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzu w8, d0 -; CHECK-NEXT: fcvtzu w9, d1 -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: fcvtzu w8, d2 -; CHECK-NEXT: mov v0.s[1], w9 -; CHECK-NEXT: mov v0.s[2], w8 -; CHECK-NEXT: fcvtzu w8, d0 -; CHECK-NEXT: mov v0.s[3], w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v3f64_v3i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzu w8, d0 +; CHECK-SD-NEXT: fcvtzu w9, d1 +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: fcvtzu w8, d2 +; CHECK-SD-NEXT: mov v0.s[1], w9 +; CHECK-SD-NEXT: mov v0.s[2], w8 +; CHECK-SD-NEXT: fcvtzu w8, d0 +; CHECK-SD-NEXT: mov v0.s[3], w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v3f64_v3i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: mov v0.d[1], v1.d[0] +; CHECK-GI-NEXT: fcvtzu v1.2d, v2.2d +; CHECK-GI-NEXT: movi v2.2d, #0x000000ffffffff +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: cmhi v4.2d, v2.2d, v1.2d +; CHECK-GI-NEXT: bif v1.16b, v2.16b, v4.16b +; CHECK-GI-NEXT: cmhi v3.2d, v2.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v2.16b, v3.16b +; CHECK-GI-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: ret %x = call <3 x i32> @llvm.fptoui.sat.v3f64.v3i32(<3 x double> %f) ret <3 x i32> %x } define <4 x i32> @test_unsigned_v4f64_v4i32(<4 x double> %f) { -; CHECK-LABEL: test_unsigned_v4f64_v4i32: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d2, v0.d[1] -; CHECK-NEXT: fcvtzu w8, d0 -; CHECK-NEXT: fcvtzu w9, d2 -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: fcvtzu w8, d1 -; CHECK-NEXT: mov d1, v1.d[1] -; CHECK-NEXT: mov v0.s[1], w9 -; CHECK-NEXT: mov v0.s[2], w8 -; CHECK-NEXT: fcvtzu w8, d1 -; CHECK-NEXT: mov v0.s[3], w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v4f64_v4i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d2, v0.d[1] +; CHECK-SD-NEXT: fcvtzu w8, d0 +; CHECK-SD-NEXT: fcvtzu w9, d2 +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: fcvtzu w8, d1 +; CHECK-SD-NEXT: mov d1, v1.d[1] +; CHECK-SD-NEXT: mov v0.s[1], w9 +; CHECK-SD-NEXT: mov v0.s[2], w8 +; CHECK-SD-NEXT: fcvtzu w8, d1 +; CHECK-SD-NEXT: mov v0.s[3], w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v4f64_v4i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v2.2d, #0x000000ffffffff +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: fcvtzu v1.2d, v1.2d +; CHECK-GI-NEXT: cmhi v3.2d, v2.2d, v0.2d +; CHECK-GI-NEXT: cmhi v4.2d, v2.2d, v1.2d +; CHECK-GI-NEXT: bif v0.16b, v2.16b, v3.16b +; CHECK-GI-NEXT: bif v1.16b, v2.16b, v4.16b +; CHECK-GI-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: ret %x = call <4 x i32> @llvm.fptoui.sat.v4f64.v4i32(<4 x double> %f) ret <4 x i32> %x } define <5 x i32> @test_unsigned_v5f64_v5i32(<5 x double> %f) { -; CHECK-LABEL: test_unsigned_v5f64_v5i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzu w0, d0 -; CHECK-NEXT: fcvtzu w1, d1 -; CHECK-NEXT: fcvtzu w2, d2 -; CHECK-NEXT: fcvtzu w3, d3 -; CHECK-NEXT: fcvtzu w4, d4 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v5f64_v5i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzu w0, d0 +; CHECK-SD-NEXT: fcvtzu w1, d1 +; CHECK-SD-NEXT: fcvtzu w2, d2 +; CHECK-SD-NEXT: fcvtzu w3, d3 +; CHECK-SD-NEXT: fcvtzu w4, d4 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v5f64_v5i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-GI-NEXT: // kill: def $d3 killed $d3 def $q3 +; CHECK-GI-NEXT: // kill: def $d4 killed $d4 def $q4 +; CHECK-GI-NEXT: mov v0.d[1], v1.d[0] +; CHECK-GI-NEXT: mov v2.d[1], v3.d[0] +; CHECK-GI-NEXT: movi v1.2d, #0x000000ffffffff +; CHECK-GI-NEXT: fcvtzu v3.2d, v4.2d +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: fcvtzu v2.2d, v2.2d +; CHECK-GI-NEXT: cmhi v4.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: cmhi v5.2d, v1.2d, v2.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v4.16b +; CHECK-GI-NEXT: bif v2.16b, v1.16b, v5.16b +; CHECK-GI-NEXT: cmhi v4.2d, v1.2d, v3.2d +; CHECK-GI-NEXT: bit v1.16b, v3.16b, v4.16b +; CHECK-GI-NEXT: mov d3, v0.d[1] +; CHECK-GI-NEXT: mov d4, v2.d[1] +; CHECK-GI-NEXT: fmov x0, d0 +; CHECK-GI-NEXT: fmov x2, d2 +; CHECK-GI-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-GI-NEXT: // kill: def $w2 killed $w2 killed $x2 +; CHECK-GI-NEXT: fmov x4, d1 +; CHECK-GI-NEXT: fmov x1, d3 +; CHECK-GI-NEXT: fmov x3, d4 +; CHECK-GI-NEXT: // kill: def $w4 killed $w4 killed $x4 +; CHECK-GI-NEXT: // kill: def $w1 killed $w1 killed $x1 +; CHECK-GI-NEXT: // kill: def $w3 killed $w3 killed $x3 +; CHECK-GI-NEXT: ret %x = call <5 x i32> @llvm.fptoui.sat.v5f64.v5i32(<5 x double> %f) ret <5 x i32> %x } define <6 x i32> @test_unsigned_v6f64_v6i32(<6 x double> %f) { -; CHECK-LABEL: test_unsigned_v6f64_v6i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzu w0, d0 -; CHECK-NEXT: fcvtzu w1, d1 -; CHECK-NEXT: fcvtzu w2, d2 -; CHECK-NEXT: fcvtzu w3, d3 -; CHECK-NEXT: fcvtzu w4, d4 -; CHECK-NEXT: fcvtzu w5, d5 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v6f64_v6i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzu w0, d0 +; CHECK-SD-NEXT: fcvtzu w1, d1 +; CHECK-SD-NEXT: fcvtzu w2, d2 +; CHECK-SD-NEXT: fcvtzu w3, d3 +; CHECK-SD-NEXT: fcvtzu w4, d4 +; CHECK-SD-NEXT: fcvtzu w5, d5 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v6f64_v6i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: // kill: def $d4 killed $d4 def $q4 +; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-GI-NEXT: // kill: def $d3 killed $d3 def $q3 +; CHECK-GI-NEXT: // kill: def $d5 killed $d5 def $q5 +; CHECK-GI-NEXT: mov v0.d[1], v1.d[0] +; CHECK-GI-NEXT: mov v2.d[1], v3.d[0] +; CHECK-GI-NEXT: mov v4.d[1], v5.d[0] +; CHECK-GI-NEXT: movi v1.2d, #0x000000ffffffff +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: fcvtzu v2.2d, v2.2d +; CHECK-GI-NEXT: fcvtzu v3.2d, v4.2d +; CHECK-GI-NEXT: cmhi v4.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: cmhi v5.2d, v1.2d, v2.2d +; CHECK-GI-NEXT: cmhi v6.2d, v1.2d, v3.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v4.16b +; CHECK-GI-NEXT: bif v2.16b, v1.16b, v5.16b +; CHECK-GI-NEXT: bit v1.16b, v3.16b, v6.16b +; CHECK-GI-NEXT: mov d3, v0.d[1] +; CHECK-GI-NEXT: mov d4, v2.d[1] +; CHECK-GI-NEXT: mov d5, v1.d[1] +; CHECK-GI-NEXT: fmov x0, d0 +; CHECK-GI-NEXT: fmov x2, d2 +; CHECK-GI-NEXT: fmov x4, d1 +; CHECK-GI-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-GI-NEXT: // kill: def $w2 killed $w2 killed $x2 +; CHECK-GI-NEXT: // kill: def $w4 killed $w4 killed $x4 +; CHECK-GI-NEXT: fmov x1, d3 +; CHECK-GI-NEXT: fmov x3, d4 +; CHECK-GI-NEXT: fmov x5, d5 +; CHECK-GI-NEXT: // kill: def $w1 killed $w1 killed $x1 +; CHECK-GI-NEXT: // kill: def $w3 killed $w3 killed $x3 +; CHECK-GI-NEXT: // kill: def $w5 killed $w5 killed $x5 +; CHECK-GI-NEXT: ret %x = call <6 x i32> @llvm.fptoui.sat.v6f64.v6i32(<6 x double> %f) ret <6 x i32> %x } @@ -245,237 +453,592 @@ declare <3 x i32> @llvm.fptoui.sat.v3f128.v3i32 (<3 x fp128>) declare <4 x i32> @llvm.fptoui.sat.v4f128.v4i32 (<4 x fp128>) define <1 x i32> @test_unsigned_v1f128_v1i32(<1 x fp128> %f) { -; CHECK-LABEL: test_unsigned_v1f128_v1i32: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w30, -16 -; CHECK-NEXT: adrp x8, .LCPI14_0 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI14_0] -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixunstfsi -; CHECK-NEXT: adrp x8, .LCPI14_1 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI14_1] -; CHECK-NEXT: csel w19, wzr, w0, lt -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csinv w8, w19, wzr, le -; CHECK-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: add sp, sp, #32 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v1f128_v1i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #32 +; CHECK-SD-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 32 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w30, -16 +; CHECK-SD-NEXT: adrp x8, .LCPI14_0 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI14_0] +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixunstfsi +; CHECK-SD-NEXT: adrp x8, .LCPI14_1 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI14_1] +; CHECK-SD-NEXT: csel w19, wzr, w0, lt +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csinv w8, w19, wzr, le +; CHECK-SD-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: add sp, sp, #32 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v1f128_v1i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #64 +; CHECK-GI-NEXT: stp d9, d8, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #48] // 8-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: .cfi_offset w30, -16 +; CHECK-GI-NEXT: .cfi_offset b8, -24 +; CHECK-GI-NEXT: .cfi_offset b9, -32 +; CHECK-GI-NEXT: adrp x8, .LCPI14_1 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI14_1] +; CHECK-GI-NEXT: stp q0, q1, [sp] // 32-byte Folded Spill +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q3, q2, [sp] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v3.d[1] +; CHECK-GI-NEXT: mov d1, v2.d[1] +; CHECK-GI-NEXT: fcsel d8, d3, d2, lt +; CHECK-GI-NEXT: fmov x8, d8 +; CHECK-GI-NEXT: fcsel d9, d0, d1, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d9 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: adrp x8, .LCPI14_0 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI14_0] +; CHECK-GI-NEXT: str q1, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: fcsel d1, d8, d1, gt +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: fcsel d2, d9, d0, gt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d2 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixunstfsi +; CHECK-GI-NEXT: ldp d9, d8, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.s[0], w0 +; CHECK-GI-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: ret %x = call <1 x i32> @llvm.fptoui.sat.v1f128.v1i32(<1 x fp128> %f) ret <1 x i32> %x } define <2 x i32> @test_unsigned_v2f128_v2i32(<2 x fp128> %f) { -; CHECK-LABEL: test_unsigned_v2f128_v2i32: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #96 -; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 96 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w30, -32 -; CHECK-NEXT: mov v2.16b, v1.16b -; CHECK-NEXT: stp q1, q0, [sp, #32] // 32-byte Folded Spill -; CHECK-NEXT: adrp x8, .LCPI15_0 -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI15_0] -; CHECK-NEXT: mov v0.16b, v2.16b -; CHECK-NEXT: str q1, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixunstfsi -; CHECK-NEXT: adrp x8, .LCPI15_1 -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI15_1] -; CHECK-NEXT: csel w19, wzr, w0, lt -; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csinv w20, w19, wzr, le -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixunstfsi -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: csel w19, wzr, w0, lt -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload -; CHECK-NEXT: csinv w8, w19, wzr, le -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov v0.s[1], w20 -; CHECK-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: add sp, sp, #96 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v2f128_v2i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #96 +; CHECK-SD-NEXT: str x30, [sp, #64] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 96 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w30, -32 +; CHECK-SD-NEXT: mov v2.16b, v1.16b +; CHECK-SD-NEXT: stp q1, q0, [sp, #32] // 32-byte Folded Spill +; CHECK-SD-NEXT: adrp x8, .LCPI15_0 +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI15_0] +; CHECK-SD-NEXT: mov v0.16b, v2.16b +; CHECK-SD-NEXT: str q1, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixunstfsi +; CHECK-SD-NEXT: adrp x8, .LCPI15_1 +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI15_1] +; CHECK-SD-NEXT: csel w19, wzr, w0, lt +; CHECK-SD-NEXT: str q1, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csinv w20, w19, wzr, le +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixunstfsi +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: csel w19, wzr, w0, lt +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload +; CHECK-SD-NEXT: csinv w8, w19, wzr, le +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov v0.s[1], w20 +; CHECK-SD-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: add sp, sp, #96 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v2f128_v2i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #96 +; CHECK-GI-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 96 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w30, -16 +; CHECK-GI-NEXT: .cfi_offset b8, -24 +; CHECK-GI-NEXT: .cfi_offset b9, -32 +; CHECK-GI-NEXT: .cfi_offset b10, -40 +; CHECK-GI-NEXT: .cfi_offset b11, -48 +; CHECK-GI-NEXT: adrp x8, .LCPI15_1 +; CHECK-GI-NEXT: str q1, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI15_1] +; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: mov v1.16b, v2.16b +; CHECK-GI-NEXT: str q2, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q2, q1, [sp, #16] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v2.d[1] +; CHECK-GI-NEXT: mov d8, v1.d[1] +; CHECK-GI-NEXT: fcsel d9, d2, d1, lt +; CHECK-GI-NEXT: fmov x8, d9 +; CHECK-GI-NEXT: fcsel d10, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: adrp x8, .LCPI15_0 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI15_0] +; CHECK-GI-NEXT: str q1, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d11, v0.d[1] +; CHECK-GI-NEXT: fcsel d0, d9, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: fcsel d1, d10, d11, gt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixunstfsi +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w19, w0 +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q3, q1, [sp] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: ldr q2, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov d0, v3.d[1] +; CHECK-GI-NEXT: fcsel d9, d3, d2, lt +; CHECK-GI-NEXT: fmov x8, d9 +; CHECK-GI-NEXT: fcsel d8, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d8 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: fcsel d1, d8, d11, gt +; CHECK-GI-NEXT: fcsel d0, d9, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixunstfsi +; CHECK-GI-NEXT: mov v0.s[0], w19 +; CHECK-GI-NEXT: ldp x30, x19, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.s[1], w0 +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-NEXT: add sp, sp, #96 +; CHECK-GI-NEXT: ret %x = call <2 x i32> @llvm.fptoui.sat.v2f128.v2i32(<2 x fp128> %f) ret <2 x i32> %x } define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) { -; CHECK-LABEL: test_unsigned_v3f128_v3i32: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #112 -; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 112 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w30, -32 -; CHECK-NEXT: stp q0, q2, [sp, #48] // 32-byte Folded Spill -; CHECK-NEXT: mov v2.16b, v1.16b -; CHECK-NEXT: adrp x8, .LCPI16_0 -; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI16_0] -; CHECK-NEXT: mov v0.16b, v2.16b -; CHECK-NEXT: str q1, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixunstfsi -; CHECK-NEXT: adrp x8, .LCPI16_1 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI16_1] -; CHECK-NEXT: csel w19, wzr, w0, lt -; CHECK-NEXT: str q1, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: ldp q1, q0, [sp, #32] // 32-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csinv w20, w19, wzr, le -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixunstfsi -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: csel w19, wzr, w0, lt -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: ldr q1, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: csinv w8, w19, wzr, le -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov v0.s[1], w20 -; CHECK-NEXT: str q0, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixunstfsi -; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: csel w19, wzr, w0, lt -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload -; CHECK-NEXT: csinv w8, w19, wzr, le -; CHECK-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload -; CHECK-NEXT: mov v0.s[2], w8 -; CHECK-NEXT: add sp, sp, #112 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v3f128_v3i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #112 +; CHECK-SD-NEXT: str x30, [sp, #80] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 112 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w30, -32 +; CHECK-SD-NEXT: stp q0, q2, [sp, #48] // 32-byte Folded Spill +; CHECK-SD-NEXT: mov v2.16b, v1.16b +; CHECK-SD-NEXT: adrp x8, .LCPI16_0 +; CHECK-SD-NEXT: str q1, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI16_0] +; CHECK-SD-NEXT: mov v0.16b, v2.16b +; CHECK-SD-NEXT: str q1, [sp, #32] // 16-byte Folded Spill +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixunstfsi +; CHECK-SD-NEXT: adrp x8, .LCPI16_1 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI16_1] +; CHECK-SD-NEXT: csel w19, wzr, w0, lt +; CHECK-SD-NEXT: str q1, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: ldp q1, q0, [sp, #32] // 32-byte Folded Reload +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csinv w20, w19, wzr, le +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixunstfsi +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: csel w19, wzr, w0, lt +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: ldr q1, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: csinv w8, w19, wzr, le +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov v0.s[1], w20 +; CHECK-SD-NEXT: str q0, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixunstfsi +; CHECK-SD-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: csel w19, wzr, w0, lt +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload +; CHECK-SD-NEXT: csinv w8, w19, wzr, le +; CHECK-SD-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov v0.s[2], w8 +; CHECK-SD-NEXT: add sp, sp, #112 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v3f128_v3i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #128 +; CHECK-GI-NEXT: stp d11, d10, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp d9, d8, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #96] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 128 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w30, -32 +; CHECK-GI-NEXT: .cfi_offset b8, -40 +; CHECK-GI-NEXT: .cfi_offset b9, -48 +; CHECK-GI-NEXT: .cfi_offset b10, -56 +; CHECK-GI-NEXT: .cfi_offset b11, -64 +; CHECK-GI-NEXT: adrp x8, .LCPI16_1 +; CHECK-GI-NEXT: str q1, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI16_1] +; CHECK-GI-NEXT: str q0, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q2, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q1, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q2, q1, [sp, #32] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v2.d[1] +; CHECK-GI-NEXT: mov d8, v1.d[1] +; CHECK-GI-NEXT: fcsel d10, d2, d1, lt +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: fcsel d11, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d11 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: adrp x8, .LCPI16_0 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI16_0] +; CHECK-GI-NEXT: str q1, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d9, v0.d[1] +; CHECK-GI-NEXT: fcsel d0, d10, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: fcsel d1, d11, d9, gt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixunstfsi +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w19, w0 +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q1, q3, [sp, #32] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: ldr q2, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov d0, v2.d[1] +; CHECK-GI-NEXT: fcsel d10, d2, d3, lt +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: fcsel d11, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d11 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: fcsel d1, d11, d9, gt +; CHECK-GI-NEXT: fcsel d0, d10, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixunstfsi +; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w20, w0 +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q4, q1, [sp, #16] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: ldr q2, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov d0, v4.d[1] +; CHECK-GI-NEXT: fcsel d10, d4, d2, lt +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: fcsel d8, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d8 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: fcsel d1, d8, d9, gt +; CHECK-GI-NEXT: fcsel d0, d10, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixunstfsi +; CHECK-GI-NEXT: mov v0.s[0], w19 +; CHECK-GI-NEXT: ldp d9, d8, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d11, d10, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload +; CHECK-GI-NEXT: mov v0.s[1], w20 +; CHECK-GI-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.s[2], w0 +; CHECK-GI-NEXT: add sp, sp, #128 +; CHECK-GI-NEXT: ret %x = call <3 x i32> @llvm.fptoui.sat.v3f128.v3i32(<3 x fp128> %f) ret <3 x i32> %x } define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) { -; CHECK-LABEL: test_unsigned_v4f128_v4i32: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #128 -; CHECK-NEXT: str x30, [sp, #96] // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 128 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w30, -32 -; CHECK-NEXT: stp q0, q2, [sp, #16] // 32-byte Folded Spill -; CHECK-NEXT: mov v2.16b, v1.16b -; CHECK-NEXT: adrp x8, .LCPI17_0 -; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI17_0] -; CHECK-NEXT: str q3, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: mov v0.16b, v2.16b -; CHECK-NEXT: str q1, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixunstfsi -; CHECK-NEXT: adrp x8, .LCPI17_1 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI17_1] -; CHECK-NEXT: csel w19, wzr, w0, lt -; CHECK-NEXT: str q1, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: csinv w20, w19, wzr, le -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixunstfsi -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: csel w19, wzr, w0, lt -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: csinv w8, w19, wzr, le -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov v0.s[1], w20 -; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixunstfsi -; CHECK-NEXT: ldp q0, q1, [sp, #32] // 32-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: csel w19, wzr, w0, lt -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: csinv w8, w19, wzr, le -; CHECK-NEXT: mov v0.s[2], w8 -; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: ldp q1, q0, [sp, #64] // 32-byte Folded Reload -; CHECK-NEXT: bl __getf2 -; CHECK-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: bl __fixunstfsi -; CHECK-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 -; CHECK-NEXT: csel w19, wzr, w0, lt -; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload -; CHECK-NEXT: csinv w8, w19, wzr, le -; CHECK-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload -; CHECK-NEXT: mov v0.s[3], w8 -; CHECK-NEXT: add sp, sp, #128 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v4f128_v4i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #128 +; CHECK-SD-NEXT: str x30, [sp, #96] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 128 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w30, -32 +; CHECK-SD-NEXT: stp q0, q2, [sp, #16] // 32-byte Folded Spill +; CHECK-SD-NEXT: mov v2.16b, v1.16b +; CHECK-SD-NEXT: adrp x8, .LCPI17_0 +; CHECK-SD-NEXT: str q1, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI17_0] +; CHECK-SD-NEXT: str q3, [sp, #80] // 16-byte Folded Spill +; CHECK-SD-NEXT: mov v0.16b, v2.16b +; CHECK-SD-NEXT: str q1, [sp, #64] // 16-byte Folded Spill +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixunstfsi +; CHECK-SD-NEXT: adrp x8, .LCPI17_1 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI17_1] +; CHECK-SD-NEXT: csel w19, wzr, w0, lt +; CHECK-SD-NEXT: str q1, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: csinv w20, w19, wzr, le +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixunstfsi +; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: csel w19, wzr, w0, lt +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: csinv w8, w19, wzr, le +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov v0.s[1], w20 +; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixunstfsi +; CHECK-SD-NEXT: ldp q0, q1, [sp, #32] // 32-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: csel w19, wzr, w0, lt +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: csinv w8, w19, wzr, le +; CHECK-SD-NEXT: mov v0.s[2], w8 +; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: ldp q1, q0, [sp, #64] // 32-byte Folded Reload +; CHECK-SD-NEXT: bl __getf2 +; CHECK-SD-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w19, w0 +; CHECK-SD-NEXT: bl __fixunstfsi +; CHECK-SD-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: cmp w19, #0 +; CHECK-SD-NEXT: csel w19, wzr, w0, lt +; CHECK-SD-NEXT: bl __gttf2 +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload +; CHECK-SD-NEXT: csinv w8, w19, wzr, le +; CHECK-SD-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov v0.s[3], w8 +; CHECK-SD-NEXT: add sp, sp, #128 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v4f128_v4i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #144 +; CHECK-GI-NEXT: stp d11, d10, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp d9, d8, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x30, x21, [sp, #112] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #128] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 144 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w30, -32 +; CHECK-GI-NEXT: .cfi_offset b8, -40 +; CHECK-GI-NEXT: .cfi_offset b9, -48 +; CHECK-GI-NEXT: .cfi_offset b10, -56 +; CHECK-GI-NEXT: .cfi_offset b11, -64 +; CHECK-GI-NEXT: adrp x8, .LCPI17_1 +; CHECK-GI-NEXT: stp q1, q2, [sp] // 32-byte Folded Spill +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI17_1] +; CHECK-GI-NEXT: str q0, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q3, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q1, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q2, q1, [sp, #48] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d0, v2.d[1] +; CHECK-GI-NEXT: mov d8, v1.d[1] +; CHECK-GI-NEXT: fcsel d10, d2, d1, lt +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: fcsel d11, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d11 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: adrp x8, .LCPI17_0 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI17_0] +; CHECK-GI-NEXT: str q1, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: mov d9, v0.d[1] +; CHECK-GI-NEXT: fcsel d0, d10, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: fcsel d1, d11, d9, gt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixunstfsi +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w19, w0 +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q1, q4, [sp, #48] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: ldr q2, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov d0, v2.d[1] +; CHECK-GI-NEXT: fcsel d10, d2, d4, lt +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: fcsel d11, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d11 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: fcsel d1, d11, d9, gt +; CHECK-GI-NEXT: fcsel d0, d10, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixunstfsi +; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w20, w0 +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q1, q5, [sp, #48] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov d0, v2.d[1] +; CHECK-GI-NEXT: fcsel d10, d2, d5, lt +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: fcsel d11, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d11 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: fcsel d1, d11, d9, gt +; CHECK-GI-NEXT: fcsel d0, d10, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixunstfsi +; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w21, w0 +; CHECK-GI-NEXT: bl __getf2 +; CHECK-GI-NEXT: ldp q6, q1, [sp, #32] // 32-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: ldr q2, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov d0, v6.d[1] +; CHECK-GI-NEXT: fcsel d10, d6, d2, lt +; CHECK-GI-NEXT: fmov x8, d10 +; CHECK-GI-NEXT: fcsel d8, d0, d8, lt +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d8 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: fcsel d1, d8, d9, gt +; CHECK-GI-NEXT: fcsel d0, d10, d0, gt +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: mov v0.d[0], x8 +; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: bl __fixunstfsi +; CHECK-GI-NEXT: mov v0.s[0], w19 +; CHECK-GI-NEXT: ldp d9, d8, [sp, #96] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d11, d10, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.s[1], w20 +; CHECK-GI-NEXT: ldp x20, x19, [sp, #128] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.s[2], w21 +; CHECK-GI-NEXT: ldp x30, x21, [sp, #112] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.s[3], w0 +; CHECK-GI-NEXT: add sp, sp, #144 +; CHECK-GI-NEXT: ret %x = call <4 x i32> @llvm.fptoui.sat.v4f128.v4i32(<4 x fp128> %f) ret <4 x i32> %x } @@ -494,29 +1057,53 @@ declare <7 x i32> @llvm.fptoui.sat.v7f16.v7i32 (<7 x half>) declare <8 x i32> @llvm.fptoui.sat.v8f16.v8i32 (<8 x half>) define <1 x i32> @test_unsigned_v1f16_v1i32(<1 x half> %f) { -; CHECK-CVT-LABEL: test_unsigned_v1f16_v1i32: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: fcvtzu w8, s0 -; CHECK-CVT-NEXT: fmov s0, w8 -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_v1f16_v1i32: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu w8, h0 -; CHECK-FP16-NEXT: fmov s0, w8 -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_v1f16_v1i32: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: fcvtzu w8, s0 +; CHECK-SD-CVT-NEXT: fmov s0, w8 +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_v1f16_v1i32: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu w8, h0 +; CHECK-SD-FP16-NEXT: fmov s0, w8 +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v1f16_v1i32: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: fcvtzu w8, s0 +; CHECK-GI-CVT-NEXT: mov v0.s[0], w8 +; CHECK-GI-CVT-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v1f16_v1i32: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu w8, h0 +; CHECK-GI-FP16-NEXT: mov v0.s[0], w8 +; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-FP16-NEXT: ret %x = call <1 x i32> @llvm.fptoui.sat.v1f16.v1i32(<1 x half> %f) ret <1 x i32> %x } define <2 x i32> @test_unsigned_v2f16_v2i32(<2 x half> %f) { -; CHECK-LABEL: test_unsigned_v2f16_v2i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl v0.4s, v0.4h -; CHECK-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v2f16_v2i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v2f16_v2i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: mov h1, v0.h[1] +; CHECK-GI-NEXT: mov v0.h[1], v1.h[0] +; CHECK-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-NEXT: fcvtzu v0.2s, v0.2s +; CHECK-GI-NEXT: ret %x = call <2 x i32> @llvm.fptoui.sat.v2f16.v2i32(<2 x half> %f) ret <2 x i32> %x } @@ -542,67 +1129,135 @@ define <4 x i32> @test_unsigned_v4f16_v4i32(<4 x half> %f) { } define <5 x i32> @test_unsigned_v5f16_v5i32(<5 x half> %f) { -; CHECK-LABEL: test_unsigned_v5f16_v5i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl v1.4s, v0.4h -; CHECK-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-NEXT: mov w1, v1.s[1] -; CHECK-NEXT: mov w2, v1.s[2] -; CHECK-NEXT: mov w3, v1.s[3] -; CHECK-NEXT: fmov w0, s1 -; CHECK-NEXT: fmov w4, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v5f16_v5i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtl v1.4s, v0.4h +; CHECK-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-SD-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-NEXT: mov w1, v1.s[1] +; CHECK-SD-NEXT: mov w2, v1.s[2] +; CHECK-SD-NEXT: mov w3, v1.s[3] +; CHECK-SD-NEXT: fmov w0, s1 +; CHECK-SD-NEXT: fmov w4, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v5f16_v5i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-GI-NEXT: mov v0.h[0], v0.h[4] +; CHECK-GI-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-NEXT: mov s2, v1.s[1] +; CHECK-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-NEXT: mov s3, v1.s[2] +; CHECK-GI-NEXT: mov s4, v1.s[3] +; CHECK-GI-NEXT: fmov w0, s1 +; CHECK-GI-NEXT: fmov w1, s2 +; CHECK-GI-NEXT: fmov w2, s3 +; CHECK-GI-NEXT: fmov w4, s0 +; CHECK-GI-NEXT: fmov w3, s4 +; CHECK-GI-NEXT: ret %x = call <5 x i32> @llvm.fptoui.sat.v5f16.v5i32(<5 x half> %f) ret <5 x i32> %x } define <6 x i32> @test_unsigned_v6f16_v6i32(<6 x half> %f) { -; CHECK-LABEL: test_unsigned_v6f16_v6i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl v1.4s, v0.4h -; CHECK-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-NEXT: mov w1, v1.s[1] -; CHECK-NEXT: mov w2, v1.s[2] -; CHECK-NEXT: mov w5, v0.s[1] -; CHECK-NEXT: mov w3, v1.s[3] -; CHECK-NEXT: fmov w4, s0 -; CHECK-NEXT: fmov w0, s1 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v6f16_v6i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtl v1.4s, v0.4h +; CHECK-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-SD-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-NEXT: mov w1, v1.s[1] +; CHECK-SD-NEXT: mov w2, v1.s[2] +; CHECK-SD-NEXT: mov w5, v0.s[1] +; CHECK-SD-NEXT: mov w3, v1.s[3] +; CHECK-SD-NEXT: fmov w4, s0 +; CHECK-SD-NEXT: fmov w0, s1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v6f16_v6i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov v1.h[0], v0.h[4] +; CHECK-GI-NEXT: mov v1.h[1], v0.h[5] +; CHECK-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-GI-NEXT: mov s2, v0.s[1] +; CHECK-GI-NEXT: mov s3, v0.s[2] +; CHECK-GI-NEXT: mov s4, v0.s[3] +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: mov s5, v1.s[1] +; CHECK-GI-NEXT: fmov w1, s2 +; CHECK-GI-NEXT: fmov w2, s3 +; CHECK-GI-NEXT: fmov w3, s4 +; CHECK-GI-NEXT: fmov w4, s1 +; CHECK-GI-NEXT: fmov w5, s5 +; CHECK-GI-NEXT: ret %x = call <6 x i32> @llvm.fptoui.sat.v6f16.v6i32(<6 x half> %f) ret <6 x i32> %x } define <7 x i32> @test_unsigned_v7f16_v7i32(<7 x half> %f) { -; CHECK-LABEL: test_unsigned_v7f16_v7i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl v1.4s, v0.4h -; CHECK-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-NEXT: mov w1, v1.s[1] -; CHECK-NEXT: mov w2, v1.s[2] -; CHECK-NEXT: mov w3, v1.s[3] -; CHECK-NEXT: mov w5, v0.s[1] -; CHECK-NEXT: mov w6, v0.s[2] -; CHECK-NEXT: fmov w0, s1 -; CHECK-NEXT: fmov w4, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v7f16_v7i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtl v1.4s, v0.4h +; CHECK-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-SD-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-NEXT: mov w1, v1.s[1] +; CHECK-SD-NEXT: mov w2, v1.s[2] +; CHECK-SD-NEXT: mov w3, v1.s[3] +; CHECK-SD-NEXT: mov w5, v0.s[1] +; CHECK-SD-NEXT: mov w6, v0.s[2] +; CHECK-SD-NEXT: fmov w0, s1 +; CHECK-SD-NEXT: fmov w4, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v7f16_v7i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov v1.h[0], v0.h[4] +; CHECK-GI-NEXT: mov v1.h[1], v0.h[5] +; CHECK-GI-NEXT: mov v1.h[2], v0.h[6] +; CHECK-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-GI-NEXT: mov s2, v0.s[1] +; CHECK-GI-NEXT: mov s3, v0.s[2] +; CHECK-GI-NEXT: mov s4, v0.s[3] +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: mov s5, v1.s[1] +; CHECK-GI-NEXT: mov s6, v1.s[2] +; CHECK-GI-NEXT: fmov w1, s2 +; CHECK-GI-NEXT: fmov w2, s3 +; CHECK-GI-NEXT: fmov w3, s4 +; CHECK-GI-NEXT: fmov w4, s1 +; CHECK-GI-NEXT: fmov w5, s5 +; CHECK-GI-NEXT: fmov w6, s6 +; CHECK-GI-NEXT: ret %x = call <7 x i32> @llvm.fptoui.sat.v7f16.v7i32(<7 x half> %f) ret <7 x i32> %x } define <8 x i32> @test_unsigned_v8f16_v8i32(<8 x half> %f) { -; CHECK-LABEL: test_unsigned_v8f16_v8i32: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-NEXT: fcvtl v0.4s, v0.4h -; CHECK-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v8f16_v8i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v8f16_v8i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-GI-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-GI-NEXT: fcvtzu v0.4s, v1.4s +; CHECK-GI-NEXT: fcvtzu v1.4s, v2.4s +; CHECK-GI-NEXT: ret %x = call <8 x i32> @llvm.fptoui.sat.v8f16.v8i32(<8 x half> %f) ret <8 x i32> %x } @@ -686,20 +1341,30 @@ define <2 x i32> @test_unsigned_v2f32_v2i32_duplicate(<2 x float> %f) { } define <2 x i50> @test_unsigned_v2f32_v2i50(<2 x float> %f) { -; CHECK-LABEL: test_unsigned_v2f32_v2i50: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: mov s1, v0.s[1] -; CHECK-NEXT: fcvtzu x9, s0 -; CHECK-NEXT: mov x10, #1125899906842623 // =0x3ffffffffffff -; CHECK-NEXT: fcvtzu x8, s1 -; CHECK-NEXT: cmp x8, x10 -; CHECK-NEXT: csel x8, x8, x10, lo -; CHECK-NEXT: cmp x9, x10 -; CHECK-NEXT: csel x9, x9, x10, lo -; CHECK-NEXT: fmov d0, x9 -; CHECK-NEXT: mov v0.d[1], x8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v2f32_v2i50: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: mov s1, v0.s[1] +; CHECK-SD-NEXT: fcvtzu x9, s0 +; CHECK-SD-NEXT: mov x10, #1125899906842623 // =0x3ffffffffffff +; CHECK-SD-NEXT: fcvtzu x8, s1 +; CHECK-SD-NEXT: cmp x8, x10 +; CHECK-SD-NEXT: csel x8, x8, x10, lo +; CHECK-SD-NEXT: cmp x9, x10 +; CHECK-SD-NEXT: csel x9, x9, x10, lo +; CHECK-SD-NEXT: fmov d0, x9 +; CHECK-SD-NEXT: mov v0.d[1], x8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v2f32_v2i50: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtl v0.2d, v0.2s +; CHECK-GI-NEXT: adrp x8, .LCPI32_0 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI32_0] +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: cmhi v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: ret %x = call <2 x i50> @llvm.fptoui.sat.v2f32.v2i50(<2 x float> %f) ret <2 x i50> %x } @@ -715,95 +1380,181 @@ define <2 x i64> @test_unsigned_v2f32_v2i64(<2 x float> %f) { } define <2 x i100> @test_unsigned_v2f32_v2i100(<2 x float> %f) { -; CHECK-LABEL: test_unsigned_v2f32_v2i100: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #64 -; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: stp x30, x21, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w30, -32 -; CHECK-NEXT: .cfi_offset b8, -40 -; CHECK-NEXT: .cfi_offset b9, -48 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w8, #1904214015 // =0x717fffff -; CHECK-NEXT: mov x21, #68719476735 // =0xfffffffff -; CHECK-NEXT: fmov s9, w8 -; CHECK-NEXT: mov s8, v0.s[1] -; CHECK-NEXT: fcmp s0, #0.0 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s0, s9 -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: csel x19, x21, x9, gt -; CHECK-NEXT: csinv x20, x8, xzr, le -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: mov x0, x20 -; CHECK-NEXT: mov x1, x19 -; CHECK-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: csel x3, x21, x9, gt -; CHECK-NEXT: ldp x30, x21, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: csinv x2, x8, xzr, le -; CHECK-NEXT: add sp, sp, #64 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v2f32_v2i100: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #64 +; CHECK-SD-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x30, x21, [sp, #32] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 64 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w30, -32 +; CHECK-SD-NEXT: .cfi_offset b8, -40 +; CHECK-SD-NEXT: .cfi_offset b9, -48 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w8, #1904214015 // =0x717fffff +; CHECK-SD-NEXT: mov x21, #68719476735 // =0xfffffffff +; CHECK-SD-NEXT: fmov s9, w8 +; CHECK-SD-NEXT: mov s8, v0.s[1] +; CHECK-SD-NEXT: fcmp s0, #0.0 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s0, s9 +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: csel x19, x21, x9, gt +; CHECK-SD-NEXT: csinv x20, x8, xzr, le +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: mov x0, x20 +; CHECK-SD-NEXT: mov x1, x19 +; CHECK-SD-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x3, x21, x9, gt +; CHECK-SD-NEXT: ldp x30, x21, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: csinv x2, x8, xzr, le +; CHECK-SD-NEXT: add sp, sp, #64 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v2f32_v2i100: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #64 +; CHECK-GI-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x30, x21, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w30, -32 +; CHECK-GI-NEXT: .cfi_offset b8, -40 +; CHECK-GI-NEXT: .cfi_offset b9, -48 +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: mov s8, v0.s[1] +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-GI-NEXT: bl __fixunssfti +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w8, #1904214015 // =0x717fffff +; CHECK-GI-NEXT: mov x21, #68719476735 // =0xfffffffff +; CHECK-GI-NEXT: fmov s9, w8 +; CHECK-GI-NEXT: fcmp s0, #0.0 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp s0, s9 +; CHECK-GI-NEXT: fmov s0, s8 +; CHECK-GI-NEXT: csinv x19, x8, xzr, le +; CHECK-GI-NEXT: csel x20, x21, x9, gt +; CHECK-GI-NEXT: bl __fixunssfti +; CHECK-GI-NEXT: fcmp s8, #0.0 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp s8, s9 +; CHECK-GI-NEXT: mov x0, x19 +; CHECK-GI-NEXT: mov x1, x20 +; CHECK-GI-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel x3, x21, x9, gt +; CHECK-GI-NEXT: ldp x30, x21, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: csinv x2, x8, xzr, le +; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: ret %x = call <2 x i100> @llvm.fptoui.sat.v2f32.v2i100(<2 x float> %f) ret <2 x i100> %x } define <2 x i128> @test_unsigned_v2f32_v2i128(<2 x float> %f) { -; CHECK-LABEL: test_unsigned_v2f32_v2i128: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #64 -; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w30, -32 -; CHECK-NEXT: .cfi_offset b8, -40 -; CHECK-NEXT: .cfi_offset b9, -48 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w8, #2139095039 // =0x7f7fffff -; CHECK-NEXT: fmov s9, w8 -; CHECK-NEXT: mov s8, v0.s[1] -; CHECK-NEXT: fcmp s0, #0.0 -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s0, s9 -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: csinv x19, x9, xzr, le -; CHECK-NEXT: csinv x20, x8, xzr, le -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: mov x0, x19 -; CHECK-NEXT: mov x1, x20 -; CHECK-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: csinv x2, x9, xzr, le -; CHECK-NEXT: csinv x3, x8, xzr, le -; CHECK-NEXT: add sp, sp, #64 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v2f32_v2i128: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #64 +; CHECK-SD-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #32] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 64 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w30, -32 +; CHECK-SD-NEXT: .cfi_offset b8, -40 +; CHECK-SD-NEXT: .cfi_offset b9, -48 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w8, #2139095039 // =0x7f7fffff +; CHECK-SD-NEXT: fmov s9, w8 +; CHECK-SD-NEXT: mov s8, v0.s[1] +; CHECK-SD-NEXT: fcmp s0, #0.0 +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s0, s9 +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: csinv x19, x9, xzr, le +; CHECK-SD-NEXT: csinv x20, x8, xzr, le +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: mov x0, x19 +; CHECK-SD-NEXT: mov x1, x20 +; CHECK-SD-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: csinv x2, x9, xzr, le +; CHECK-SD-NEXT: csinv x3, x8, xzr, le +; CHECK-SD-NEXT: add sp, sp, #64 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v2f32_v2i128: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #64 +; CHECK-GI-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #32] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w30, -32 +; CHECK-GI-NEXT: .cfi_offset b8, -40 +; CHECK-GI-NEXT: .cfi_offset b9, -48 +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: mov s8, v0.s[1] +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-GI-NEXT: bl __fixunssfti +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w8, #2139095039 // =0x7f7fffff +; CHECK-GI-NEXT: fmov s9, w8 +; CHECK-GI-NEXT: fcmp s0, #0.0 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp s0, s9 +; CHECK-GI-NEXT: fmov s0, s8 +; CHECK-GI-NEXT: csinv x19, x8, xzr, le +; CHECK-GI-NEXT: csinv x20, x9, xzr, le +; CHECK-GI-NEXT: bl __fixunssfti +; CHECK-GI-NEXT: fcmp s8, #0.0 +; CHECK-GI-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp s8, s9 +; CHECK-GI-NEXT: mov x0, x19 +; CHECK-GI-NEXT: mov x1, x20 +; CHECK-GI-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: csinv x2, x8, xzr, le +; CHECK-GI-NEXT: csinv x3, x9, xzr, le +; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: ret %x = call <2 x i128> @llvm.fptoui.sat.v2f32.v2i128(<2 x float> %f) ret <2 x i128> %x } @@ -859,11 +1610,19 @@ define <4 x i13> @test_unsigned_v4f32_v4i13(<4 x float> %f) { } define <4 x i16> @test_unsigned_v4f32_v4i16(<4 x float> %f) { -; CHECK-LABEL: test_unsigned_v4f32_v4i16: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-NEXT: uqxtn v0.4h, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v4f32_v4i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-NEXT: uqxtn v0.4h, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v4f32_v4i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v1.2d, #0x00ffff0000ffff +; CHECK-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: ret %x = call <4 x i16> @llvm.fptoui.sat.v4f32.v4i16(<4 x float> %f) ret <4 x i16> %x } @@ -913,171 +1672,331 @@ define <4 x i50> @test_unsigned_v4f32_v4i50(<4 x float> %f) { } define <4 x i64> @test_unsigned_v4f32_v4i64(<4 x float> %f) { -; CHECK-LABEL: test_unsigned_v4f32_v4i64: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl2 v1.2d, v0.4s -; CHECK-NEXT: fcvtl v0.2d, v0.2s -; CHECK-NEXT: fcvtzu v1.2d, v1.2d -; CHECK-NEXT: fcvtzu v0.2d, v0.2d -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v4f32_v4i64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtl2 v1.2d, v0.4s +; CHECK-SD-NEXT: fcvtl v0.2d, v0.2s +; CHECK-SD-NEXT: fcvtzu v1.2d, v1.2d +; CHECK-SD-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v4f32_v4i64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtl v1.2d, v0.2s +; CHECK-GI-NEXT: fcvtl2 v2.2d, v0.4s +; CHECK-GI-NEXT: fcvtzu v0.2d, v1.2d +; CHECK-GI-NEXT: fcvtzu v1.2d, v2.2d +; CHECK-GI-NEXT: ret %x = call <4 x i64> @llvm.fptoui.sat.v4f32.v4i64(<4 x float> %f) ret <4 x i64> %x } define <4 x i100> @test_unsigned_v4f32_v4i100(<4 x float> %f) { -; CHECK-LABEL: test_unsigned_v4f32_v4i100: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #96 -; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: stp x30, x25, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: stp x24, x23, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 96 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w23, -40 -; CHECK-NEXT: .cfi_offset w24, -48 -; CHECK-NEXT: .cfi_offset w25, -56 -; CHECK-NEXT: .cfi_offset w30, -64 -; CHECK-NEXT: .cfi_offset b8, -72 -; CHECK-NEXT: .cfi_offset b9, -80 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w8, #1904214015 // =0x717fffff -; CHECK-NEXT: mov x25, #68719476735 // =0xfffffffff -; CHECK-NEXT: fmov s9, w8 -; CHECK-NEXT: mov s8, v0.s[1] -; CHECK-NEXT: fcmp s0, #0.0 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s0, s9 -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: csel x19, x25, x9, gt -; CHECK-NEXT: csinv x20, x8, xzr, le -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: csel x21, x25, x9, gt -; CHECK-NEXT: csinv x22, x8, xzr, le -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov s8, v0.s[1] -; CHECK-NEXT: fcmp s0, #0.0 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s0, s9 -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: csel x23, x25, x9, gt -; CHECK-NEXT: csinv x24, x8, xzr, le -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: mov x2, x22 -; CHECK-NEXT: mov x3, x21 -; CHECK-NEXT: mov x4, x24 -; CHECK-NEXT: mov x5, x23 -; CHECK-NEXT: ldp x22, x21, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: mov x0, x20 -; CHECK-NEXT: mov x1, x19 -; CHECK-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: csel x7, x25, x9, gt -; CHECK-NEXT: ldp x24, x23, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldp x30, x25, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: csinv x6, x8, xzr, le -; CHECK-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #96 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v4f32_v4i100: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #96 +; CHECK-SD-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x30, x25, [sp, #32] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x24, x23, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #64] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 96 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w23, -40 +; CHECK-SD-NEXT: .cfi_offset w24, -48 +; CHECK-SD-NEXT: .cfi_offset w25, -56 +; CHECK-SD-NEXT: .cfi_offset w30, -64 +; CHECK-SD-NEXT: .cfi_offset b8, -72 +; CHECK-SD-NEXT: .cfi_offset b9, -80 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w8, #1904214015 // =0x717fffff +; CHECK-SD-NEXT: mov x25, #68719476735 // =0xfffffffff +; CHECK-SD-NEXT: fmov s9, w8 +; CHECK-SD-NEXT: mov s8, v0.s[1] +; CHECK-SD-NEXT: fcmp s0, #0.0 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s0, s9 +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: csel x19, x25, x9, gt +; CHECK-SD-NEXT: csinv x20, x8, xzr, le +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: csel x21, x25, x9, gt +; CHECK-SD-NEXT: csinv x22, x8, xzr, le +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov s8, v0.s[1] +; CHECK-SD-NEXT: fcmp s0, #0.0 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s0, s9 +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: csel x23, x25, x9, gt +; CHECK-SD-NEXT: csinv x24, x8, xzr, le +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: mov x2, x22 +; CHECK-SD-NEXT: mov x3, x21 +; CHECK-SD-NEXT: mov x4, x24 +; CHECK-SD-NEXT: mov x5, x23 +; CHECK-SD-NEXT: ldp x22, x21, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: mov x0, x20 +; CHECK-SD-NEXT: mov x1, x19 +; CHECK-SD-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x7, x25, x9, gt +; CHECK-SD-NEXT: ldp x24, x23, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x30, x25, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: csinv x6, x8, xzr, le +; CHECK-SD-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: add sp, sp, #96 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v4f32_v4i100: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #112 +; CHECK-GI-NEXT: stp d11, d10, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp d9, d8, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x30, x25, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 112 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w23, -40 +; CHECK-GI-NEXT: .cfi_offset w24, -48 +; CHECK-GI-NEXT: .cfi_offset w25, -56 +; CHECK-GI-NEXT: .cfi_offset w30, -64 +; CHECK-GI-NEXT: .cfi_offset b8, -72 +; CHECK-GI-NEXT: .cfi_offset b9, -80 +; CHECK-GI-NEXT: .cfi_offset b10, -88 +; CHECK-GI-NEXT: .cfi_offset b11, -96 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: mov s8, v0.s[1] +; CHECK-GI-NEXT: mov s9, v0.s[2] +; CHECK-GI-NEXT: mov s10, v0.s[3] +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-GI-NEXT: bl __fixunssfti +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w8, #1904214015 // =0x717fffff +; CHECK-GI-NEXT: mov x25, #68719476735 // =0xfffffffff +; CHECK-GI-NEXT: fmov s11, w8 +; CHECK-GI-NEXT: fcmp s0, #0.0 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp s0, s11 +; CHECK-GI-NEXT: fmov s0, s8 +; CHECK-GI-NEXT: csinv x19, x8, xzr, le +; CHECK-GI-NEXT: csel x20, x25, x9, gt +; CHECK-GI-NEXT: bl __fixunssfti +; CHECK-GI-NEXT: fcmp s8, #0.0 +; CHECK-GI-NEXT: fmov s0, s9 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp s8, s11 +; CHECK-GI-NEXT: csinv x21, x8, xzr, le +; CHECK-GI-NEXT: csel x22, x25, x9, gt +; CHECK-GI-NEXT: bl __fixunssfti +; CHECK-GI-NEXT: fcmp s9, #0.0 +; CHECK-GI-NEXT: fmov s0, s10 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp s9, s11 +; CHECK-GI-NEXT: csinv x23, x8, xzr, le +; CHECK-GI-NEXT: csel x24, x25, x9, gt +; CHECK-GI-NEXT: bl __fixunssfti +; CHECK-GI-NEXT: fcmp s10, #0.0 +; CHECK-GI-NEXT: mov x2, x21 +; CHECK-GI-NEXT: mov x3, x22 +; CHECK-GI-NEXT: mov x4, x23 +; CHECK-GI-NEXT: mov x5, x24 +; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp s10, s11 +; CHECK-GI-NEXT: mov x0, x19 +; CHECK-GI-NEXT: mov x1, x20 +; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel x7, x25, x9, gt +; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x30, x25, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: csinv x6, x8, xzr, le +; CHECK-GI-NEXT: ldp d9, d8, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d11, d10, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: add sp, sp, #112 +; CHECK-GI-NEXT: ret %x = call <4 x i100> @llvm.fptoui.sat.v4f32.v4i100(<4 x float> %f) ret <4 x i100> %x } define <4 x i128> @test_unsigned_v4f32_v4i128(<4 x float> %f) { -; CHECK-LABEL: test_unsigned_v4f32_v4i128: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #96 -; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill -; CHECK-NEXT: stp x24, x23, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 96 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w23, -40 -; CHECK-NEXT: .cfi_offset w24, -48 -; CHECK-NEXT: .cfi_offset w30, -64 -; CHECK-NEXT: .cfi_offset b8, -72 -; CHECK-NEXT: .cfi_offset b9, -80 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w8, #2139095039 // =0x7f7fffff -; CHECK-NEXT: fmov s9, w8 -; CHECK-NEXT: mov s8, v0.s[1] -; CHECK-NEXT: fcmp s0, #0.0 -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s0, s9 -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: csinv x19, x9, xzr, le -; CHECK-NEXT: csinv x20, x8, xzr, le -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: csinv x21, x9, xzr, le -; CHECK-NEXT: csinv x22, x8, xzr, le -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov s8, v0.s[1] -; CHECK-NEXT: fcmp s0, #0.0 -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s0, s9 -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: csinv x23, x9, xzr, le -; CHECK-NEXT: csinv x24, x8, xzr, le -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: mov x2, x21 -; CHECK-NEXT: mov x3, x22 -; CHECK-NEXT: mov x4, x23 -; CHECK-NEXT: mov x5, x24 -; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload -; CHECK-NEXT: ldp x22, x21, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: mov x0, x19 -; CHECK-NEXT: mov x1, x20 -; CHECK-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: ldp x24, x23, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: csinv x6, x9, xzr, le -; CHECK-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: csinv x7, x8, xzr, le -; CHECK-NEXT: add sp, sp, #96 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v4f32_v4i128: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #96 +; CHECK-SD-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #32] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x24, x23, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #64] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 96 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w23, -40 +; CHECK-SD-NEXT: .cfi_offset w24, -48 +; CHECK-SD-NEXT: .cfi_offset w30, -64 +; CHECK-SD-NEXT: .cfi_offset b8, -72 +; CHECK-SD-NEXT: .cfi_offset b9, -80 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w8, #2139095039 // =0x7f7fffff +; CHECK-SD-NEXT: fmov s9, w8 +; CHECK-SD-NEXT: mov s8, v0.s[1] +; CHECK-SD-NEXT: fcmp s0, #0.0 +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s0, s9 +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: csinv x19, x9, xzr, le +; CHECK-SD-NEXT: csinv x20, x8, xzr, le +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: csinv x21, x9, xzr, le +; CHECK-SD-NEXT: csinv x22, x8, xzr, le +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov s8, v0.s[1] +; CHECK-SD-NEXT: fcmp s0, #0.0 +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s0, s9 +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: csinv x23, x9, xzr, le +; CHECK-SD-NEXT: csinv x24, x8, xzr, le +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: mov x2, x21 +; CHECK-SD-NEXT: mov x3, x22 +; CHECK-SD-NEXT: mov x4, x23 +; CHECK-SD-NEXT: mov x5, x24 +; CHECK-SD-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload +; CHECK-SD-NEXT: ldp x22, x21, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: mov x0, x19 +; CHECK-SD-NEXT: mov x1, x20 +; CHECK-SD-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x24, x23, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: csinv x6, x9, xzr, le +; CHECK-SD-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: csinv x7, x8, xzr, le +; CHECK-SD-NEXT: add sp, sp, #96 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v4f32_v4i128: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #112 +; CHECK-GI-NEXT: stp d11, d10, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp d9, d8, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #48] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 112 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w23, -40 +; CHECK-GI-NEXT: .cfi_offset w24, -48 +; CHECK-GI-NEXT: .cfi_offset w30, -64 +; CHECK-GI-NEXT: .cfi_offset b8, -72 +; CHECK-GI-NEXT: .cfi_offset b9, -80 +; CHECK-GI-NEXT: .cfi_offset b10, -88 +; CHECK-GI-NEXT: .cfi_offset b11, -96 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: mov s8, v0.s[1] +; CHECK-GI-NEXT: mov s9, v0.s[2] +; CHECK-GI-NEXT: mov s10, v0.s[3] +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-GI-NEXT: bl __fixunssfti +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov w8, #2139095039 // =0x7f7fffff +; CHECK-GI-NEXT: fmov s11, w8 +; CHECK-GI-NEXT: fcmp s0, #0.0 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp s0, s11 +; CHECK-GI-NEXT: fmov s0, s8 +; CHECK-GI-NEXT: csinv x19, x8, xzr, le +; CHECK-GI-NEXT: csinv x20, x9, xzr, le +; CHECK-GI-NEXT: bl __fixunssfti +; CHECK-GI-NEXT: fcmp s8, #0.0 +; CHECK-GI-NEXT: fmov s0, s9 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp s8, s11 +; CHECK-GI-NEXT: csinv x21, x8, xzr, le +; CHECK-GI-NEXT: csinv x22, x9, xzr, le +; CHECK-GI-NEXT: bl __fixunssfti +; CHECK-GI-NEXT: fcmp s9, #0.0 +; CHECK-GI-NEXT: fmov s0, s10 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp s9, s11 +; CHECK-GI-NEXT: csinv x23, x8, xzr, le +; CHECK-GI-NEXT: csinv x24, x9, xzr, le +; CHECK-GI-NEXT: bl __fixunssfti +; CHECK-GI-NEXT: fcmp s10, #0.0 +; CHECK-GI-NEXT: mov x2, x21 +; CHECK-GI-NEXT: mov x3, x22 +; CHECK-GI-NEXT: mov x4, x23 +; CHECK-GI-NEXT: mov x5, x24 +; CHECK-GI-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload +; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp s10, s11 +; CHECK-GI-NEXT: mov x0, x19 +; CHECK-GI-NEXT: mov x1, x20 +; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: csinv x6, x8, xzr, le +; CHECK-GI-NEXT: ldp d9, d8, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: csinv x7, x9, xzr, le +; CHECK-GI-NEXT: ldp d11, d10, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: add sp, sp, #112 +; CHECK-GI-NEXT: ret %x = call <4 x i128> @llvm.fptoui.sat.v4f32.v4i128(<4 x float> %f) ret <4 x i128> %x } @@ -1097,127 +2016,193 @@ declare <2 x i100> @llvm.fptoui.sat.v2f64.v2i100(<2 x double>) declare <2 x i128> @llvm.fptoui.sat.v2f64.v2i128(<2 x double>) define <2 x i1> @test_unsigned_v2f64_v2i1(<2 x double> %f) { -; CHECK-LABEL: test_unsigned_v2f64_v2i1: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: fcvtzu w9, d0 -; CHECK-NEXT: fcvtzu w8, d1 -; CHECK-NEXT: cmp w8, #1 -; CHECK-NEXT: csinc w8, w8, wzr, lo -; CHECK-NEXT: cmp w9, #1 -; CHECK-NEXT: csinc w9, w9, wzr, lo -; CHECK-NEXT: fmov s0, w9 -; CHECK-NEXT: mov v0.s[1], w8 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v2f64_v2i1: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: fcvtzu w9, d0 +; CHECK-SD-NEXT: fcvtzu w8, d1 +; CHECK-SD-NEXT: cmp w8, #1 +; CHECK-SD-NEXT: csinc w8, w8, wzr, lo +; CHECK-SD-NEXT: cmp w9, #1 +; CHECK-SD-NEXT: csinc w9, w9, wzr, lo +; CHECK-SD-NEXT: fmov s0, w9 +; CHECK-SD-NEXT: mov v0.s[1], w8 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v2f64_v2i1: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: adrp x8, .LCPI46_0 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI46_0] +; CHECK-GI-NEXT: cmhi v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: ret %x = call <2 x i1> @llvm.fptoui.sat.v2f64.v2i1(<2 x double> %f) ret <2 x i1> %x } define <2 x i8> @test_unsigned_v2f64_v2i8(<2 x double> %f) { -; CHECK-LABEL: test_unsigned_v2f64_v2i8: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: fcvtzu w10, d0 -; CHECK-NEXT: mov w8, #255 // =0xff -; CHECK-NEXT: fcvtzu w9, d1 -; CHECK-NEXT: cmp w9, #255 -; CHECK-NEXT: csel w9, w9, w8, lo -; CHECK-NEXT: cmp w10, #255 -; CHECK-NEXT: csel w8, w10, w8, lo -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov v0.s[1], w9 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v2f64_v2i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: fcvtzu w10, d0 +; CHECK-SD-NEXT: mov w8, #255 // =0xff +; CHECK-SD-NEXT: fcvtzu w9, d1 +; CHECK-SD-NEXT: cmp w9, #255 +; CHECK-SD-NEXT: csel w9, w9, w8, lo +; CHECK-SD-NEXT: cmp w10, #255 +; CHECK-SD-NEXT: csel w8, w10, w8, lo +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov v0.s[1], w9 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v2f64_v2i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v1.2d, #0x000000000000ff +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: cmhi v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: ret %x = call <2 x i8> @llvm.fptoui.sat.v2f64.v2i8(<2 x double> %f) ret <2 x i8> %x } define <2 x i13> @test_unsigned_v2f64_v2i13(<2 x double> %f) { -; CHECK-LABEL: test_unsigned_v2f64_v2i13: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: fcvtzu w9, d0 -; CHECK-NEXT: mov w10, #8191 // =0x1fff -; CHECK-NEXT: fcvtzu w8, d1 -; CHECK-NEXT: cmp w8, w10 -; CHECK-NEXT: csel w8, w8, w10, lo -; CHECK-NEXT: cmp w9, w10 -; CHECK-NEXT: csel w9, w9, w10, lo -; CHECK-NEXT: fmov s0, w9 -; CHECK-NEXT: mov v0.s[1], w8 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v2f64_v2i13: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: fcvtzu w9, d0 +; CHECK-SD-NEXT: mov w10, #8191 // =0x1fff +; CHECK-SD-NEXT: fcvtzu w8, d1 +; CHECK-SD-NEXT: cmp w8, w10 +; CHECK-SD-NEXT: csel w8, w8, w10, lo +; CHECK-SD-NEXT: cmp w9, w10 +; CHECK-SD-NEXT: csel w9, w9, w10, lo +; CHECK-SD-NEXT: fmov s0, w9 +; CHECK-SD-NEXT: mov v0.s[1], w8 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v2f64_v2i13: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: adrp x8, .LCPI48_0 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI48_0] +; CHECK-GI-NEXT: cmhi v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: ret %x = call <2 x i13> @llvm.fptoui.sat.v2f64.v2i13(<2 x double> %f) ret <2 x i13> %x } define <2 x i16> @test_unsigned_v2f64_v2i16(<2 x double> %f) { -; CHECK-LABEL: test_unsigned_v2f64_v2i16: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: fcvtzu w9, d0 -; CHECK-NEXT: mov w10, #65535 // =0xffff -; CHECK-NEXT: fcvtzu w8, d1 -; CHECK-NEXT: cmp w8, w10 -; CHECK-NEXT: csel w8, w8, w10, lo -; CHECK-NEXT: cmp w9, w10 -; CHECK-NEXT: csel w9, w9, w10, lo -; CHECK-NEXT: fmov s0, w9 -; CHECK-NEXT: mov v0.s[1], w8 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v2f64_v2i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: fcvtzu w9, d0 +; CHECK-SD-NEXT: mov w10, #65535 // =0xffff +; CHECK-SD-NEXT: fcvtzu w8, d1 +; CHECK-SD-NEXT: cmp w8, w10 +; CHECK-SD-NEXT: csel w8, w8, w10, lo +; CHECK-SD-NEXT: cmp w9, w10 +; CHECK-SD-NEXT: csel w9, w9, w10, lo +; CHECK-SD-NEXT: fmov s0, w9 +; CHECK-SD-NEXT: mov v0.s[1], w8 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v2f64_v2i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v1.2d, #0x0000000000ffff +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: cmhi v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: ret %x = call <2 x i16> @llvm.fptoui.sat.v2f64.v2i16(<2 x double> %f) ret <2 x i16> %x } define <2 x i19> @test_unsigned_v2f64_v2i19(<2 x double> %f) { -; CHECK-LABEL: test_unsigned_v2f64_v2i19: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: fcvtzu w9, d0 -; CHECK-NEXT: mov w10, #524287 // =0x7ffff -; CHECK-NEXT: fcvtzu w8, d1 -; CHECK-NEXT: cmp w8, w10 -; CHECK-NEXT: csel w8, w8, w10, lo -; CHECK-NEXT: cmp w9, w10 -; CHECK-NEXT: csel w9, w9, w10, lo -; CHECK-NEXT: fmov s0, w9 -; CHECK-NEXT: mov v0.s[1], w8 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v2f64_v2i19: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: fcvtzu w9, d0 +; CHECK-SD-NEXT: mov w10, #524287 // =0x7ffff +; CHECK-SD-NEXT: fcvtzu w8, d1 +; CHECK-SD-NEXT: cmp w8, w10 +; CHECK-SD-NEXT: csel w8, w8, w10, lo +; CHECK-SD-NEXT: cmp w9, w10 +; CHECK-SD-NEXT: csel w9, w9, w10, lo +; CHECK-SD-NEXT: fmov s0, w9 +; CHECK-SD-NEXT: mov v0.s[1], w8 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v2f64_v2i19: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: adrp x8, .LCPI50_0 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI50_0] +; CHECK-GI-NEXT: cmhi v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: ret %x = call <2 x i19> @llvm.fptoui.sat.v2f64.v2i19(<2 x double> %f) ret <2 x i19> %x } define <2 x i32> @test_unsigned_v2f64_v2i32_duplicate(<2 x double> %f) { -; CHECK-LABEL: test_unsigned_v2f64_v2i32_duplicate: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: fcvtzu w8, d0 -; CHECK-NEXT: fcvtzu w9, d1 -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: mov v0.s[1], w9 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v2f64_v2i32_duplicate: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: fcvtzu w8, d0 +; CHECK-SD-NEXT: fcvtzu w9, d1 +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: mov v0.s[1], w9 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v2f64_v2i32_duplicate: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v1.2d, #0x000000ffffffff +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: cmhi v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: ret %x = call <2 x i32> @llvm.fptoui.sat.v2f64.v2i32(<2 x double> %f) ret <2 x i32> %x } define <2 x i50> @test_unsigned_v2f64_v2i50(<2 x double> %f) { -; CHECK-LABEL: test_unsigned_v2f64_v2i50: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: fcvtzu x9, d0 -; CHECK-NEXT: mov x10, #1125899906842623 // =0x3ffffffffffff -; CHECK-NEXT: fcvtzu x8, d1 -; CHECK-NEXT: cmp x8, x10 -; CHECK-NEXT: csel x8, x8, x10, lo -; CHECK-NEXT: cmp x9, x10 -; CHECK-NEXT: csel x9, x9, x10, lo -; CHECK-NEXT: fmov d0, x9 -; CHECK-NEXT: mov v0.d[1], x8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v2f64_v2i50: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: fcvtzu x9, d0 +; CHECK-SD-NEXT: mov x10, #1125899906842623 // =0x3ffffffffffff +; CHECK-SD-NEXT: fcvtzu x8, d1 +; CHECK-SD-NEXT: cmp x8, x10 +; CHECK-SD-NEXT: csel x8, x8, x10, lo +; CHECK-SD-NEXT: cmp x9, x10 +; CHECK-SD-NEXT: csel x9, x9, x10, lo +; CHECK-SD-NEXT: fmov d0, x9 +; CHECK-SD-NEXT: mov v0.d[1], x8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v2f64_v2i50: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: adrp x8, .LCPI52_0 +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI52_0] +; CHECK-GI-NEXT: cmhi v2.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-GI-NEXT: ret %x = call <2 x i50> @llvm.fptoui.sat.v2f64.v2i50(<2 x double> %f) ret <2 x i50> %x } @@ -1232,93 +2217,177 @@ define <2 x i64> @test_unsigned_v2f64_v2i64(<2 x double> %f) { } define <2 x i100> @test_unsigned_v2f64_v2i100(<2 x double> %f) { -; CHECK-LABEL: test_unsigned_v2f64_v2i100: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #64 -; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: stp x30, x21, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w30, -32 -; CHECK-NEXT: .cfi_offset b8, -40 -; CHECK-NEXT: .cfi_offset b9, -48 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: bl __fixunsdfti -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov x8, #5057542381537067007 // =0x462fffffffffffff -; CHECK-NEXT: mov x21, #68719476735 // =0xfffffffff -; CHECK-NEXT: fmov d9, x8 -; CHECK-NEXT: mov d8, v0.d[1] -; CHECK-NEXT: fcmp d0, #0.0 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp d0, d9 -; CHECK-NEXT: fmov d0, d8 -; CHECK-NEXT: csel x19, x21, x9, gt -; CHECK-NEXT: csinv x20, x8, xzr, le -; CHECK-NEXT: bl __fixunsdfti -; CHECK-NEXT: fcmp d8, #0.0 -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp d8, d9 -; CHECK-NEXT: mov x0, x20 -; CHECK-NEXT: mov x1, x19 -; CHECK-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: csel x3, x21, x9, gt -; CHECK-NEXT: ldp x30, x21, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: csinv x2, x8, xzr, le -; CHECK-NEXT: add sp, sp, #64 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v2f64_v2i100: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #64 +; CHECK-SD-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x30, x21, [sp, #32] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 64 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w30, -32 +; CHECK-SD-NEXT: .cfi_offset b8, -40 +; CHECK-SD-NEXT: .cfi_offset b9, -48 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: bl __fixunsdfti +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov x8, #5057542381537067007 // =0x462fffffffffffff +; CHECK-SD-NEXT: mov x21, #68719476735 // =0xfffffffff +; CHECK-SD-NEXT: fmov d9, x8 +; CHECK-SD-NEXT: mov d8, v0.d[1] +; CHECK-SD-NEXT: fcmp d0, #0.0 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp d0, d9 +; CHECK-SD-NEXT: fmov d0, d8 +; CHECK-SD-NEXT: csel x19, x21, x9, gt +; CHECK-SD-NEXT: csinv x20, x8, xzr, le +; CHECK-SD-NEXT: bl __fixunsdfti +; CHECK-SD-NEXT: fcmp d8, #0.0 +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp d8, d9 +; CHECK-SD-NEXT: mov x0, x20 +; CHECK-SD-NEXT: mov x1, x19 +; CHECK-SD-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x3, x21, x9, gt +; CHECK-SD-NEXT: ldp x30, x21, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: csinv x2, x8, xzr, le +; CHECK-SD-NEXT: add sp, sp, #64 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v2f64_v2i100: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #64 +; CHECK-GI-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x30, x21, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w30, -32 +; CHECK-GI-NEXT: .cfi_offset b8, -40 +; CHECK-GI-NEXT: .cfi_offset b9, -48 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: mov d8, v0.d[1] +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-NEXT: bl __fixunsdfti +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov x8, #5057542381537067007 // =0x462fffffffffffff +; CHECK-GI-NEXT: mov x21, #68719476735 // =0xfffffffff +; CHECK-GI-NEXT: fmov d9, x8 +; CHECK-GI-NEXT: fcmp d0, #0.0 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp d0, d9 +; CHECK-GI-NEXT: fmov d0, d8 +; CHECK-GI-NEXT: csinv x19, x8, xzr, le +; CHECK-GI-NEXT: csel x20, x21, x9, gt +; CHECK-GI-NEXT: bl __fixunsdfti +; CHECK-GI-NEXT: fcmp d8, #0.0 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp d8, d9 +; CHECK-GI-NEXT: mov x0, x19 +; CHECK-GI-NEXT: mov x1, x20 +; CHECK-GI-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: csel x3, x21, x9, gt +; CHECK-GI-NEXT: ldp x30, x21, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: csinv x2, x8, xzr, le +; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: ret %x = call <2 x i100> @llvm.fptoui.sat.v2f64.v2i100(<2 x double> %f) ret <2 x i100> %x } define <2 x i128> @test_unsigned_v2f64_v2i128(<2 x double> %f) { -; CHECK-LABEL: test_unsigned_v2f64_v2i128: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #64 -; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w30, -32 -; CHECK-NEXT: .cfi_offset b8, -40 -; CHECK-NEXT: .cfi_offset b9, -48 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: bl __fixunsdfti -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov x8, #5183643171103440895 // =0x47efffffffffffff -; CHECK-NEXT: fmov d9, x8 -; CHECK-NEXT: mov d8, v0.d[1] -; CHECK-NEXT: fcmp d0, #0.0 -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp d0, d9 -; CHECK-NEXT: fmov d0, d8 -; CHECK-NEXT: csinv x19, x9, xzr, le -; CHECK-NEXT: csinv x20, x8, xzr, le -; CHECK-NEXT: bl __fixunsdfti -; CHECK-NEXT: fcmp d8, #0.0 -; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp d8, d9 -; CHECK-NEXT: mov x0, x19 -; CHECK-NEXT: mov x1, x20 -; CHECK-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: csinv x2, x9, xzr, le -; CHECK-NEXT: csinv x3, x8, xzr, le -; CHECK-NEXT: add sp, sp, #64 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v2f64_v2i128: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #64 +; CHECK-SD-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #32] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 64 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w30, -32 +; CHECK-SD-NEXT: .cfi_offset b8, -40 +; CHECK-SD-NEXT: .cfi_offset b9, -48 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: bl __fixunsdfti +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov x8, #5183643171103440895 // =0x47efffffffffffff +; CHECK-SD-NEXT: fmov d9, x8 +; CHECK-SD-NEXT: mov d8, v0.d[1] +; CHECK-SD-NEXT: fcmp d0, #0.0 +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp d0, d9 +; CHECK-SD-NEXT: fmov d0, d8 +; CHECK-SD-NEXT: csinv x19, x9, xzr, le +; CHECK-SD-NEXT: csinv x20, x8, xzr, le +; CHECK-SD-NEXT: bl __fixunsdfti +; CHECK-SD-NEXT: fcmp d8, #0.0 +; CHECK-SD-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp d8, d9 +; CHECK-SD-NEXT: mov x0, x19 +; CHECK-SD-NEXT: mov x1, x20 +; CHECK-SD-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: csinv x2, x9, xzr, le +; CHECK-SD-NEXT: csinv x3, x8, xzr, le +; CHECK-SD-NEXT: add sp, sp, #64 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v2f64_v2i128: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #64 +; CHECK-GI-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #32] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w30, -32 +; CHECK-GI-NEXT: .cfi_offset b8, -40 +; CHECK-GI-NEXT: .cfi_offset b9, -48 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: mov d8, v0.d[1] +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-NEXT: bl __fixunsdfti +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov x8, #5183643171103440895 // =0x47efffffffffffff +; CHECK-GI-NEXT: fmov d9, x8 +; CHECK-GI-NEXT: fcmp d0, #0.0 +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp d0, d9 +; CHECK-GI-NEXT: fmov d0, d8 +; CHECK-GI-NEXT: csinv x19, x8, xzr, le +; CHECK-GI-NEXT: csinv x20, x9, xzr, le +; CHECK-GI-NEXT: bl __fixunsdfti +; CHECK-GI-NEXT: fcmp d8, #0.0 +; CHECK-GI-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload +; CHECK-GI-NEXT: csel x8, xzr, x0, lt +; CHECK-GI-NEXT: csel x9, xzr, x1, lt +; CHECK-GI-NEXT: fcmp d8, d9 +; CHECK-GI-NEXT: mov x0, x19 +; CHECK-GI-NEXT: mov x1, x20 +; CHECK-GI-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: csinv x2, x8, xzr, le +; CHECK-GI-NEXT: csinv x3, x9, xzr, le +; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: ret %x = call <2 x i128> @llvm.fptoui.sat.v2f64.v2i128(<2 x double> %f) ret <2 x i128> %x } @@ -1338,77 +2407,139 @@ declare <4 x i100> @llvm.fptoui.sat.v4f16.v4i100(<4 x half>) declare <4 x i128> @llvm.fptoui.sat.v4f16.v4i128(<4 x half>) define <4 x i1> @test_unsigned_v4f16_v4i1(<4 x half> %f) { -; CHECK-CVT-LABEL: test_unsigned_v4f16_v4i1: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: movi v1.4s, #1 -; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-CVT-NEXT: umin v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_v4f16_v4i1: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: movi v1.4h, #1 -; CHECK-FP16-NEXT: fcvtzu v0.4h, v0.4h -; CHECK-FP16-NEXT: umin v0.4h, v0.4h, v1.4h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_v4f16_v4i1: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: movi v1.4s, #1 +; CHECK-SD-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_v4f16_v4i1: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: movi v1.4h, #1 +; CHECK-SD-FP16-NEXT: fcvtzu v0.4h, v0.4h +; CHECK-SD-FP16-NEXT: umin v0.4h, v0.4h, v1.4h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v4f16_v4i1: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-CVT-NEXT: movi v1.4s, #1 +; CHECK-GI-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v4f16_v4i1: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: movi v1.4h, #1 +; CHECK-GI-FP16-NEXT: fcvtzu v0.4h, v0.4h +; CHECK-GI-FP16-NEXT: umin v0.4h, v0.4h, v1.4h +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i1> @llvm.fptoui.sat.v4f16.v4i1(<4 x half> %f) ret <4 x i1> %x } define <4 x i8> @test_unsigned_v4f16_v4i8(<4 x half> %f) { -; CHECK-CVT-LABEL: test_unsigned_v4f16_v4i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: movi v1.2d, #0x0000ff000000ff -; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-CVT-NEXT: umin v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_v4f16_v4i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: movi d1, #0xff00ff00ff00ff -; CHECK-FP16-NEXT: fcvtzu v0.4h, v0.4h -; CHECK-FP16-NEXT: umin v0.4h, v0.4h, v1.4h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_v4f16_v4i8: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: movi v1.2d, #0x0000ff000000ff +; CHECK-SD-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_v4f16_v4i8: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: movi d1, #0xff00ff00ff00ff +; CHECK-SD-FP16-NEXT: fcvtzu v0.4h, v0.4h +; CHECK-SD-FP16-NEXT: umin v0.4h, v0.4h, v1.4h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v4f16_v4i8: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-CVT-NEXT: movi v1.2d, #0x0000ff000000ff +; CHECK-GI-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v4f16_v4i8: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: movi d1, #0xff00ff00ff00ff +; CHECK-GI-FP16-NEXT: fcvtzu v0.4h, v0.4h +; CHECK-GI-FP16-NEXT: umin v0.4h, v0.4h, v1.4h +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i8> @llvm.fptoui.sat.v4f16.v4i8(<4 x half> %f) ret <4 x i8> %x } define <4 x i13> @test_unsigned_v4f16_v4i13(<4 x half> %f) { -; CHECK-CVT-LABEL: test_unsigned_v4f16_v4i13: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: movi v1.4s, #31, msl #8 -; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-CVT-NEXT: umin v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_v4f16_v4i13: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu v0.4h, v0.4h -; CHECK-FP16-NEXT: mvni v1.4h, #224, lsl #8 -; CHECK-FP16-NEXT: umin v0.4h, v0.4h, v1.4h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_v4f16_v4i13: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: movi v1.4s, #31, msl #8 +; CHECK-SD-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_v4f16_v4i13: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu v0.4h, v0.4h +; CHECK-SD-FP16-NEXT: mvni v1.4h, #224, lsl #8 +; CHECK-SD-FP16-NEXT: umin v0.4h, v0.4h, v1.4h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v4f16_v4i13: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-CVT-NEXT: movi v1.4s, #31, msl #8 +; CHECK-GI-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v4f16_v4i13: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu v0.4h, v0.4h +; CHECK-GI-FP16-NEXT: mvni v1.4h, #224, lsl #8 +; CHECK-GI-FP16-NEXT: umin v0.4h, v0.4h, v1.4h +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i13> @llvm.fptoui.sat.v4f16.v4i13(<4 x half> %f) ret <4 x i13> %x } define <4 x i16> @test_unsigned_v4f16_v4i16(<4 x half> %f) { -; CHECK-CVT-LABEL: test_unsigned_v4f16_v4i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-CVT-NEXT: uqxtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_v4f16_v4i16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu v0.4h, v0.4h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_v4f16_v4i16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: uqxtn v0.4h, v0.4s +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_v4f16_v4i16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu v0.4h, v0.4h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v4f16_v4i16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-CVT-NEXT: movi v1.2d, #0x00ffff0000ffff +; CHECK-GI-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v4f16_v4i16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu v0.4h, v0.4h +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i16> @llvm.fptoui.sat.v4f16.v4i16(<4 x half> %f) ret <4 x i16> %x } @@ -1436,257 +2567,400 @@ define <4 x i32> @test_unsigned_v4f16_v4i32_duplicate(<4 x half> %f) { } define <4 x i50> @test_unsigned_v4f16_v4i50(<4 x half> %f) { -; CHECK-CVT-LABEL: test_unsigned_v4f16_v4i50: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-CVT-NEXT: mov h1, v0.h[1] -; CHECK-CVT-NEXT: mov h2, v0.h[2] -; CHECK-CVT-NEXT: mov x8, #1125899906842623 // =0x3ffffffffffff -; CHECK-CVT-NEXT: mov h3, v0.h[3] -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: fcvt s1, h1 -; CHECK-CVT-NEXT: fcvt s2, h2 -; CHECK-CVT-NEXT: fcvt s3, h3 -; CHECK-CVT-NEXT: fcvtzu x9, s0 -; CHECK-CVT-NEXT: fcvtzu x10, s1 -; CHECK-CVT-NEXT: fcvtzu x11, s2 -; CHECK-CVT-NEXT: fcvtzu x12, s3 -; CHECK-CVT-NEXT: cmp x9, x8 -; CHECK-CVT-NEXT: csel x0, x9, x8, lo -; CHECK-CVT-NEXT: cmp x10, x8 -; CHECK-CVT-NEXT: csel x1, x10, x8, lo -; CHECK-CVT-NEXT: cmp x11, x8 -; CHECK-CVT-NEXT: csel x2, x11, x8, lo -; CHECK-CVT-NEXT: cmp x12, x8 -; CHECK-CVT-NEXT: csel x3, x12, x8, lo -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_v4f16_v4i50: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-FP16-NEXT: mov h1, v0.h[1] -; CHECK-FP16-NEXT: mov h2, v0.h[2] -; CHECK-FP16-NEXT: mov x8, #1125899906842623 // =0x3ffffffffffff -; CHECK-FP16-NEXT: mov h3, v0.h[3] -; CHECK-FP16-NEXT: fcvtzu x9, h0 -; CHECK-FP16-NEXT: fcvtzu x10, h1 -; CHECK-FP16-NEXT: fcvtzu x11, h2 -; CHECK-FP16-NEXT: fcvtzu x12, h3 -; CHECK-FP16-NEXT: cmp x9, x8 -; CHECK-FP16-NEXT: csel x0, x9, x8, lo -; CHECK-FP16-NEXT: cmp x10, x8 -; CHECK-FP16-NEXT: csel x1, x10, x8, lo -; CHECK-FP16-NEXT: cmp x11, x8 -; CHECK-FP16-NEXT: csel x2, x11, x8, lo -; CHECK-FP16-NEXT: cmp x12, x8 -; CHECK-FP16-NEXT: csel x3, x12, x8, lo -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_v4f16_v4i50: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-CVT-NEXT: mov h1, v0.h[1] +; CHECK-SD-CVT-NEXT: mov h2, v0.h[2] +; CHECK-SD-CVT-NEXT: mov x8, #1125899906842623 // =0x3ffffffffffff +; CHECK-SD-CVT-NEXT: mov h3, v0.h[3] +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: fcvt s1, h1 +; CHECK-SD-CVT-NEXT: fcvt s2, h2 +; CHECK-SD-CVT-NEXT: fcvt s3, h3 +; CHECK-SD-CVT-NEXT: fcvtzu x9, s0 +; CHECK-SD-CVT-NEXT: fcvtzu x10, s1 +; CHECK-SD-CVT-NEXT: fcvtzu x11, s2 +; CHECK-SD-CVT-NEXT: fcvtzu x12, s3 +; CHECK-SD-CVT-NEXT: cmp x9, x8 +; CHECK-SD-CVT-NEXT: csel x0, x9, x8, lo +; CHECK-SD-CVT-NEXT: cmp x10, x8 +; CHECK-SD-CVT-NEXT: csel x1, x10, x8, lo +; CHECK-SD-CVT-NEXT: cmp x11, x8 +; CHECK-SD-CVT-NEXT: csel x2, x11, x8, lo +; CHECK-SD-CVT-NEXT: cmp x12, x8 +; CHECK-SD-CVT-NEXT: csel x3, x12, x8, lo +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_v4f16_v4i50: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-FP16-NEXT: mov h1, v0.h[1] +; CHECK-SD-FP16-NEXT: mov h2, v0.h[2] +; CHECK-SD-FP16-NEXT: mov x8, #1125899906842623 // =0x3ffffffffffff +; CHECK-SD-FP16-NEXT: mov h3, v0.h[3] +; CHECK-SD-FP16-NEXT: fcvtzu x9, h0 +; CHECK-SD-FP16-NEXT: fcvtzu x10, h1 +; CHECK-SD-FP16-NEXT: fcvtzu x11, h2 +; CHECK-SD-FP16-NEXT: fcvtzu x12, h3 +; CHECK-SD-FP16-NEXT: cmp x9, x8 +; CHECK-SD-FP16-NEXT: csel x0, x9, x8, lo +; CHECK-SD-FP16-NEXT: cmp x10, x8 +; CHECK-SD-FP16-NEXT: csel x1, x10, x8, lo +; CHECK-SD-FP16-NEXT: cmp x11, x8 +; CHECK-SD-FP16-NEXT: csel x2, x11, x8, lo +; CHECK-SD-FP16-NEXT: cmp x12, x8 +; CHECK-SD-FP16-NEXT: csel x3, x12, x8, lo +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v4f16_v4i50: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-CVT-NEXT: mov h1, v0.h[1] +; CHECK-GI-CVT-NEXT: mov h2, v0.h[2] +; CHECK-GI-CVT-NEXT: mov x8, #1125899906842623 // =0x3ffffffffffff +; CHECK-GI-CVT-NEXT: mov h3, v0.h[3] +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: fcvt s1, h1 +; CHECK-GI-CVT-NEXT: fcvt s2, h2 +; CHECK-GI-CVT-NEXT: fcvt s3, h3 +; CHECK-GI-CVT-NEXT: fcvtzu x9, s0 +; CHECK-GI-CVT-NEXT: fcvtzu x10, s1 +; CHECK-GI-CVT-NEXT: fcvtzu x11, s2 +; CHECK-GI-CVT-NEXT: fcvtzu x12, s3 +; CHECK-GI-CVT-NEXT: cmp x9, x8 +; CHECK-GI-CVT-NEXT: csel x0, x9, x8, lo +; CHECK-GI-CVT-NEXT: cmp x10, x8 +; CHECK-GI-CVT-NEXT: csel x1, x10, x8, lo +; CHECK-GI-CVT-NEXT: cmp x11, x8 +; CHECK-GI-CVT-NEXT: csel x2, x11, x8, lo +; CHECK-GI-CVT-NEXT: cmp x12, x8 +; CHECK-GI-CVT-NEXT: csel x3, x12, x8, lo +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v4f16_v4i50: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-FP16-NEXT: mov h1, v0.h[1] +; CHECK-GI-FP16-NEXT: mov h2, v0.h[2] +; CHECK-GI-FP16-NEXT: mov x8, #1125899906842623 // =0x3ffffffffffff +; CHECK-GI-FP16-NEXT: mov h3, v0.h[3] +; CHECK-GI-FP16-NEXT: fcvtzu x9, h0 +; CHECK-GI-FP16-NEXT: fcvtzu x10, h1 +; CHECK-GI-FP16-NEXT: fcvtzu x11, h2 +; CHECK-GI-FP16-NEXT: fcvtzu x12, h3 +; CHECK-GI-FP16-NEXT: cmp x9, x8 +; CHECK-GI-FP16-NEXT: csel x0, x9, x8, lo +; CHECK-GI-FP16-NEXT: cmp x10, x8 +; CHECK-GI-FP16-NEXT: csel x1, x10, x8, lo +; CHECK-GI-FP16-NEXT: cmp x11, x8 +; CHECK-GI-FP16-NEXT: csel x2, x11, x8, lo +; CHECK-GI-FP16-NEXT: cmp x12, x8 +; CHECK-GI-FP16-NEXT: csel x3, x12, x8, lo +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i50> @llvm.fptoui.sat.v4f16.v4i50(<4 x half> %f) ret <4 x i50> %x } define <4 x i64> @test_unsigned_v4f16_v4i64(<4 x half> %f) { -; CHECK-CVT-LABEL: test_unsigned_v4f16_v4i64: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-CVT-NEXT: mov h1, v0.h[2] -; CHECK-CVT-NEXT: mov h2, v0.h[1] -; CHECK-CVT-NEXT: mov h3, v0.h[3] -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: fcvt s1, h1 -; CHECK-CVT-NEXT: fcvt s2, h2 -; CHECK-CVT-NEXT: fcvt s3, h3 -; CHECK-CVT-NEXT: fcvtzu x8, s0 -; CHECK-CVT-NEXT: fcvtzu x9, s1 -; CHECK-CVT-NEXT: fcvtzu x10, s2 -; CHECK-CVT-NEXT: fcvtzu x11, s3 -; CHECK-CVT-NEXT: fmov d0, x8 -; CHECK-CVT-NEXT: fmov d1, x9 -; CHECK-CVT-NEXT: mov v0.d[1], x10 -; CHECK-CVT-NEXT: mov v1.d[1], x11 -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_v4f16_v4i64: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-FP16-NEXT: mov h1, v0.h[2] -; CHECK-FP16-NEXT: mov h2, v0.h[1] -; CHECK-FP16-NEXT: mov h3, v0.h[3] -; CHECK-FP16-NEXT: fcvtzu x8, h0 -; CHECK-FP16-NEXT: fcvtzu x9, h1 -; CHECK-FP16-NEXT: fcvtzu x10, h2 -; CHECK-FP16-NEXT: fcvtzu x11, h3 -; CHECK-FP16-NEXT: fmov d0, x8 -; CHECK-FP16-NEXT: fmov d1, x9 -; CHECK-FP16-NEXT: mov v0.d[1], x10 -; CHECK-FP16-NEXT: mov v1.d[1], x11 -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_v4f16_v4i64: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-CVT-NEXT: mov h1, v0.h[2] +; CHECK-SD-CVT-NEXT: mov h2, v0.h[1] +; CHECK-SD-CVT-NEXT: mov h3, v0.h[3] +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: fcvt s1, h1 +; CHECK-SD-CVT-NEXT: fcvt s2, h2 +; CHECK-SD-CVT-NEXT: fcvt s3, h3 +; CHECK-SD-CVT-NEXT: fcvtzu x8, s0 +; CHECK-SD-CVT-NEXT: fcvtzu x9, s1 +; CHECK-SD-CVT-NEXT: fcvtzu x10, s2 +; CHECK-SD-CVT-NEXT: fcvtzu x11, s3 +; CHECK-SD-CVT-NEXT: fmov d0, x8 +; CHECK-SD-CVT-NEXT: fmov d1, x9 +; CHECK-SD-CVT-NEXT: mov v0.d[1], x10 +; CHECK-SD-CVT-NEXT: mov v1.d[1], x11 +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_v4f16_v4i64: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-FP16-NEXT: mov h1, v0.h[2] +; CHECK-SD-FP16-NEXT: mov h2, v0.h[1] +; CHECK-SD-FP16-NEXT: mov h3, v0.h[3] +; CHECK-SD-FP16-NEXT: fcvtzu x8, h0 +; CHECK-SD-FP16-NEXT: fcvtzu x9, h1 +; CHECK-SD-FP16-NEXT: fcvtzu x10, h2 +; CHECK-SD-FP16-NEXT: fcvtzu x11, h3 +; CHECK-SD-FP16-NEXT: fmov d0, x8 +; CHECK-SD-FP16-NEXT: fmov d1, x9 +; CHECK-SD-FP16-NEXT: mov v0.d[1], x10 +; CHECK-SD-FP16-NEXT: mov v1.d[1], x11 +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v4f16_v4i64: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl v1.2d, v0.2s +; CHECK-GI-CVT-NEXT: fcvtl2 v2.2d, v0.4s +; CHECK-GI-CVT-NEXT: fcvtzu v0.2d, v1.2d +; CHECK-GI-CVT-NEXT: fcvtzu v1.2d, v2.2d +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v4f16_v4i64: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-FP16-NEXT: mov s1, v0.s[1] +; CHECK-GI-FP16-NEXT: mov h2, v0.h[1] +; CHECK-GI-FP16-NEXT: fcvt d0, h0 +; CHECK-GI-FP16-NEXT: mov h3, v1.h[1] +; CHECK-GI-FP16-NEXT: fcvt d2, h2 +; CHECK-GI-FP16-NEXT: fcvt d1, h1 +; CHECK-GI-FP16-NEXT: fcvt d3, h3 +; CHECK-GI-FP16-NEXT: mov v0.d[1], v2.d[0] +; CHECK-GI-FP16-NEXT: mov v1.d[1], v3.d[0] +; CHECK-GI-FP16-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-FP16-NEXT: fcvtzu v1.2d, v1.2d +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i64> @llvm.fptoui.sat.v4f16.v4i64(<4 x half> %f) ret <4 x i64> %x } define <4 x i100> @test_unsigned_v4f16_v4i100(<4 x half> %f) { -; CHECK-LABEL: test_unsigned_v4f16_v4i100: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #96 -; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: stp x30, x25, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: stp x24, x23, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 96 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w23, -40 -; CHECK-NEXT: .cfi_offset w24, -48 -; CHECK-NEXT: .cfi_offset w25, -56 -; CHECK-NEXT: .cfi_offset w30, -64 -; CHECK-NEXT: .cfi_offset b8, -72 -; CHECK-NEXT: .cfi_offset b9, -80 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: mov h1, v0.h[1] -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: fcvt s8, h1 -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w8, #1904214015 // =0x717fffff -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: fmov s9, w8 -; CHECK-NEXT: mov x25, #68719476735 // =0xfffffffff -; CHECK-NEXT: mov h0, v0.h[2] -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x19, x25, x8, gt -; CHECK-NEXT: csinv x20, x9, xzr, le -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x21, x25, x9, gt -; CHECK-NEXT: csinv x22, x8, xzr, le -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: mov h0, v0.h[3] -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x23, x25, x9, gt -; CHECK-NEXT: csinv x24, x8, xzr, le -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: mov x2, x20 -; CHECK-NEXT: mov x3, x19 -; CHECK-NEXT: mov x4, x22 -; CHECK-NEXT: mov x5, x21 -; CHECK-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: mov x0, x24 -; CHECK-NEXT: mov x1, x23 -; CHECK-NEXT: ldp x22, x21, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: csel x7, x25, x9, gt -; CHECK-NEXT: ldp x24, x23, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: ldp x30, x25, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: csinv x6, x8, xzr, le -; CHECK-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #96 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v4f16_v4i100: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #96 +; CHECK-SD-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x30, x25, [sp, #32] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x24, x23, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #64] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 96 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w23, -40 +; CHECK-SD-NEXT: .cfi_offset w24, -48 +; CHECK-SD-NEXT: .cfi_offset w25, -56 +; CHECK-SD-NEXT: .cfi_offset w30, -64 +; CHECK-SD-NEXT: .cfi_offset b8, -72 +; CHECK-SD-NEXT: .cfi_offset b9, -80 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: mov h1, v0.h[1] +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: fcvt s8, h1 +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w8, #1904214015 // =0x717fffff +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: fmov s9, w8 +; CHECK-SD-NEXT: mov x25, #68719476735 // =0xfffffffff +; CHECK-SD-NEXT: mov h0, v0.h[2] +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x19, x25, x8, gt +; CHECK-SD-NEXT: csinv x20, x9, xzr, le +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x21, x25, x9, gt +; CHECK-SD-NEXT: csinv x22, x8, xzr, le +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: mov h0, v0.h[3] +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x23, x25, x9, gt +; CHECK-SD-NEXT: csinv x24, x8, xzr, le +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: mov x2, x20 +; CHECK-SD-NEXT: mov x3, x19 +; CHECK-SD-NEXT: mov x4, x22 +; CHECK-SD-NEXT: mov x5, x21 +; CHECK-SD-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: mov x0, x24 +; CHECK-SD-NEXT: mov x1, x23 +; CHECK-SD-NEXT: ldp x22, x21, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x7, x25, x9, gt +; CHECK-SD-NEXT: ldp x24, x23, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x30, x25, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: csinv x6, x8, xzr, le +; CHECK-SD-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: add sp, sp, #96 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v4f16_v4i100: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-CVT-NEXT: mov h1, v0.h[1] +; CHECK-GI-CVT-NEXT: mov h2, v0.h[2] +; CHECK-GI-CVT-NEXT: mov x1, xzr +; CHECK-GI-CVT-NEXT: mov h3, v0.h[3] +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov x3, xzr +; CHECK-GI-CVT-NEXT: mov x5, xzr +; CHECK-GI-CVT-NEXT: mov x7, xzr +; CHECK-GI-CVT-NEXT: fcvt s1, h1 +; CHECK-GI-CVT-NEXT: fcvt s2, h2 +; CHECK-GI-CVT-NEXT: fcvt s3, h3 +; CHECK-GI-CVT-NEXT: fcvtzu x0, s0 +; CHECK-GI-CVT-NEXT: fcvtzu x2, s1 +; CHECK-GI-CVT-NEXT: fcvtzu x4, s2 +; CHECK-GI-CVT-NEXT: fcvtzu x6, s3 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v4f16_v4i100: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-FP16-NEXT: mov h1, v0.h[1] +; CHECK-GI-FP16-NEXT: mov h2, v0.h[2] +; CHECK-GI-FP16-NEXT: mov x1, xzr +; CHECK-GI-FP16-NEXT: mov h3, v0.h[3] +; CHECK-GI-FP16-NEXT: fcvtzu x0, h0 +; CHECK-GI-FP16-NEXT: mov x3, xzr +; CHECK-GI-FP16-NEXT: mov x5, xzr +; CHECK-GI-FP16-NEXT: mov x7, xzr +; CHECK-GI-FP16-NEXT: fcvtzu x2, h1 +; CHECK-GI-FP16-NEXT: fcvtzu x4, h2 +; CHECK-GI-FP16-NEXT: fcvtzu x6, h3 +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i100> @llvm.fptoui.sat.v4f16.v4i100(<4 x half> %f) ret <4 x i100> %x } define <4 x i128> @test_unsigned_v4f16_v4i128(<4 x half> %f) { -; CHECK-LABEL: test_unsigned_v4f16_v4i128: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #96 -; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill -; CHECK-NEXT: stp x24, x23, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 96 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w23, -40 -; CHECK-NEXT: .cfi_offset w24, -48 -; CHECK-NEXT: .cfi_offset w30, -64 -; CHECK-NEXT: .cfi_offset b8, -72 -; CHECK-NEXT: .cfi_offset b9, -80 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov w8, #2139095039 // =0x7f7fffff -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: fmov s9, w8 -; CHECK-NEXT: mov h0, v0.h[1] -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csinv x19, x8, xzr, le -; CHECK-NEXT: csinv x20, x9, xzr, le -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: mov h0, v0.h[2] -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csinv x21, x9, xzr, le -; CHECK-NEXT: csinv x22, x8, xzr, le -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: mov h0, v0.h[3] -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csinv x23, x9, xzr, le -; CHECK-NEXT: csinv x24, x8, xzr, le -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: mov x2, x21 -; CHECK-NEXT: mov x3, x22 -; CHECK-NEXT: mov x4, x23 -; CHECK-NEXT: mov x5, x24 -; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload -; CHECK-NEXT: ldp x22, x21, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: mov x0, x19 -; CHECK-NEXT: mov x1, x20 -; CHECK-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: ldp x24, x23, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: csinv x6, x9, xzr, le -; CHECK-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: csinv x7, x8, xzr, le -; CHECK-NEXT: add sp, sp, #96 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v4f16_v4i128: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #96 +; CHECK-SD-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: str x30, [sp, #32] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x24, x23, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #64] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 96 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w23, -40 +; CHECK-SD-NEXT: .cfi_offset w24, -48 +; CHECK-SD-NEXT: .cfi_offset w30, -64 +; CHECK-SD-NEXT: .cfi_offset b8, -72 +; CHECK-SD-NEXT: .cfi_offset b9, -80 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w8, #2139095039 // =0x7f7fffff +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: fmov s9, w8 +; CHECK-SD-NEXT: mov h0, v0.h[1] +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csinv x19, x8, xzr, le +; CHECK-SD-NEXT: csinv x20, x9, xzr, le +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: mov h0, v0.h[2] +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csinv x21, x9, xzr, le +; CHECK-SD-NEXT: csinv x22, x8, xzr, le +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: mov h0, v0.h[3] +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csinv x23, x9, xzr, le +; CHECK-SD-NEXT: csinv x24, x8, xzr, le +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: mov x2, x21 +; CHECK-SD-NEXT: mov x3, x22 +; CHECK-SD-NEXT: mov x4, x23 +; CHECK-SD-NEXT: mov x5, x24 +; CHECK-SD-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload +; CHECK-SD-NEXT: ldp x22, x21, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: mov x0, x19 +; CHECK-SD-NEXT: mov x1, x20 +; CHECK-SD-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x24, x23, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: csinv x6, x9, xzr, le +; CHECK-SD-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: csinv x7, x8, xzr, le +; CHECK-SD-NEXT: add sp, sp, #96 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v4f16_v4i128: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-CVT-NEXT: mov h1, v0.h[1] +; CHECK-GI-CVT-NEXT: mov h2, v0.h[2] +; CHECK-GI-CVT-NEXT: mov x1, xzr +; CHECK-GI-CVT-NEXT: mov h3, v0.h[3] +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov x3, xzr +; CHECK-GI-CVT-NEXT: mov x5, xzr +; CHECK-GI-CVT-NEXT: mov x7, xzr +; CHECK-GI-CVT-NEXT: fcvt s1, h1 +; CHECK-GI-CVT-NEXT: fcvt s2, h2 +; CHECK-GI-CVT-NEXT: fcvt s3, h3 +; CHECK-GI-CVT-NEXT: fcvtzu x0, s0 +; CHECK-GI-CVT-NEXT: fcvtzu x2, s1 +; CHECK-GI-CVT-NEXT: fcvtzu x4, s2 +; CHECK-GI-CVT-NEXT: fcvtzu x6, s3 +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v4f16_v4i128: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-FP16-NEXT: mov h1, v0.h[1] +; CHECK-GI-FP16-NEXT: mov h2, v0.h[2] +; CHECK-GI-FP16-NEXT: mov x1, xzr +; CHECK-GI-FP16-NEXT: mov h3, v0.h[3] +; CHECK-GI-FP16-NEXT: fcvtzu x0, h0 +; CHECK-GI-FP16-NEXT: mov x3, xzr +; CHECK-GI-FP16-NEXT: mov x5, xzr +; CHECK-GI-FP16-NEXT: mov x7, xzr +; CHECK-GI-FP16-NEXT: fcvtzu x2, h1 +; CHECK-GI-FP16-NEXT: fcvtzu x4, h2 +; CHECK-GI-FP16-NEXT: fcvtzu x6, h3 +; CHECK-GI-FP16-NEXT: ret %x = call <4 x i128> @llvm.fptoui.sat.v4f16.v4i128(<4 x half> %f) ret <4 x i128> %x } @@ -1706,91 +2980,169 @@ declare <8 x i100> @llvm.fptoui.sat.v8f16.v8i100(<8 x half>) declare <8 x i128> @llvm.fptoui.sat.v8f16.v8i128(<8 x half>) define <8 x i1> @test_unsigned_v8f16_v8i1(<8 x half> %f) { -; CHECK-CVT-LABEL: test_unsigned_v8f16_v8i1: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: movi v1.4s, #1 -; CHECK-CVT-NEXT: fcvtzu v2.4s, v2.4s -; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-CVT-NEXT: umin v2.4s, v2.4s, v1.4s -; CHECK-CVT-NEXT: umin v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_v8f16_v8i1: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: movi v1.8h, #1 -; CHECK-FP16-NEXT: fcvtzu v0.8h, v0.8h -; CHECK-FP16-NEXT: umin v0.8h, v0.8h, v1.8h -; CHECK-FP16-NEXT: xtn v0.8b, v0.8h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_v8f16_v8i1: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: movi v1.4s, #1 +; CHECK-SD-CVT-NEXT: fcvtzu v2.4s, v2.4s +; CHECK-SD-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: umin v2.4s, v2.4s, v1.4s +; CHECK-SD-CVT-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-SD-CVT-NEXT: xtn v0.8b, v0.8h +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_v8f16_v8i1: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: movi v1.8h, #1 +; CHECK-SD-FP16-NEXT: fcvtzu v0.8h, v0.8h +; CHECK-SD-FP16-NEXT: umin v0.8h, v0.8h, v1.8h +; CHECK-SD-FP16-NEXT: xtn v0.8b, v0.8h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v8f16_v8i1: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v2.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-GI-CVT-NEXT: movi v1.4s, #1 +; CHECK-GI-CVT-NEXT: fcvtzu v2.4s, v2.4s +; CHECK-GI-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: umin v2.4s, v2.4s, v1.4s +; CHECK-GI-CVT-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-GI-CVT-NEXT: xtn v0.8b, v0.8h +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v8f16_v8i1: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: movi v1.8h, #1 +; CHECK-GI-FP16-NEXT: fcvtzu v0.8h, v0.8h +; CHECK-GI-FP16-NEXT: umin v0.8h, v0.8h, v1.8h +; CHECK-GI-FP16-NEXT: xtn v0.8b, v0.8h +; CHECK-GI-FP16-NEXT: ret %x = call <8 x i1> @llvm.fptoui.sat.v8f16.v8i1(<8 x half> %f) ret <8 x i1> %x } define <8 x i8> @test_unsigned_v8f16_v8i8(<8 x half> %f) { -; CHECK-CVT-LABEL: test_unsigned_v8f16_v8i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: movi v1.2d, #0x0000ff000000ff -; CHECK-CVT-NEXT: fcvtzu v2.4s, v2.4s -; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-CVT-NEXT: umin v2.4s, v2.4s, v1.4s -; CHECK-CVT-NEXT: umin v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_v8f16_v8i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu v0.8h, v0.8h -; CHECK-FP16-NEXT: uqxtn v0.8b, v0.8h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_v8f16_v8i8: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: movi v1.2d, #0x0000ff000000ff +; CHECK-SD-CVT-NEXT: fcvtzu v2.4s, v2.4s +; CHECK-SD-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: umin v2.4s, v2.4s, v1.4s +; CHECK-SD-CVT-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-SD-CVT-NEXT: xtn v0.8b, v0.8h +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_v8f16_v8i8: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu v0.8h, v0.8h +; CHECK-SD-FP16-NEXT: uqxtn v0.8b, v0.8h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v8f16_v8i8: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v2.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-GI-CVT-NEXT: movi v1.2d, #0x0000ff000000ff +; CHECK-GI-CVT-NEXT: fcvtzu v2.4s, v2.4s +; CHECK-GI-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: umin v2.4s, v2.4s, v1.4s +; CHECK-GI-CVT-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-GI-CVT-NEXT: xtn v0.8b, v0.8h +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v8f16_v8i8: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: movi v1.2d, #0xff00ff00ff00ff +; CHECK-GI-FP16-NEXT: fcvtzu v0.8h, v0.8h +; CHECK-GI-FP16-NEXT: umin v0.8h, v0.8h, v1.8h +; CHECK-GI-FP16-NEXT: xtn v0.8b, v0.8h +; CHECK-GI-FP16-NEXT: ret %x = call <8 x i8> @llvm.fptoui.sat.v8f16.v8i8(<8 x half> %f) ret <8 x i8> %x } define <8 x i13> @test_unsigned_v8f16_v8i13(<8 x half> %f) { -; CHECK-CVT-LABEL: test_unsigned_v8f16_v8i13: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: movi v1.4s, #31, msl #8 -; CHECK-CVT-NEXT: fcvtzu v2.4s, v2.4s -; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-CVT-NEXT: umin v2.4s, v2.4s, v1.4s -; CHECK-CVT-NEXT: umin v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_v8f16_v8i13: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu v0.8h, v0.8h -; CHECK-FP16-NEXT: mvni v1.8h, #224, lsl #8 -; CHECK-FP16-NEXT: umin v0.8h, v0.8h, v1.8h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_v8f16_v8i13: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: movi v1.4s, #31, msl #8 +; CHECK-SD-CVT-NEXT: fcvtzu v2.4s, v2.4s +; CHECK-SD-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: umin v2.4s, v2.4s, v1.4s +; CHECK-SD-CVT-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-SD-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_v8f16_v8i13: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu v0.8h, v0.8h +; CHECK-SD-FP16-NEXT: mvni v1.8h, #224, lsl #8 +; CHECK-SD-FP16-NEXT: umin v0.8h, v0.8h, v1.8h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v8f16_v8i13: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v2.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-GI-CVT-NEXT: movi v1.4s, #31, msl #8 +; CHECK-GI-CVT-NEXT: fcvtzu v2.4s, v2.4s +; CHECK-GI-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: umin v2.4s, v2.4s, v1.4s +; CHECK-GI-CVT-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v8f16_v8i13: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu v0.8h, v0.8h +; CHECK-GI-FP16-NEXT: mvni v1.8h, #224, lsl #8 +; CHECK-GI-FP16-NEXT: umin v0.8h, v0.8h, v1.8h +; CHECK-GI-FP16-NEXT: ret %x = call <8 x i13> @llvm.fptoui.sat.v8f16.v8i13(<8 x half> %f) ret <8 x i13> %x } define <8 x i16> @test_unsigned_v8f16_v8i16(<8 x half> %f) { -; CHECK-CVT-LABEL: test_unsigned_v8f16_v8i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-CVT-NEXT: uqxtn v0.4h, v1.4s -; CHECK-CVT-NEXT: fcvtzu v1.4s, v2.4s -; CHECK-CVT-NEXT: uqxtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_v8f16_v8i16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu v0.8h, v0.8h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_v8f16_v8i16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl v1.4s, v0.4h +; CHECK-SD-CVT-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-SD-CVT-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-SD-CVT-NEXT: uqxtn v0.4h, v1.4s +; CHECK-SD-CVT-NEXT: fcvtzu v1.4s, v2.4s +; CHECK-SD-CVT-NEXT: uqxtn2 v0.8h, v1.4s +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_v8f16_v8i16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu v0.8h, v0.8h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v8f16_v8i16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v2.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-GI-CVT-NEXT: movi v1.2d, #0x00ffff0000ffff +; CHECK-GI-CVT-NEXT: fcvtzu v2.4s, v2.4s +; CHECK-GI-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: umin v2.4s, v2.4s, v1.4s +; CHECK-GI-CVT-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-GI-CVT-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v8f16_v8i16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu v0.8h, v0.8h +; CHECK-GI-FP16-NEXT: ret %x = call <8 x i16> @llvm.fptoui.sat.v8f16.v8i16(<8 x half> %f) ret <8 x i16> %x } @@ -1819,327 +3171,562 @@ define <8 x i19> @test_unsigned_v8f16_v8i19(<8 x half> %f) { } define <8 x i32> @test_unsigned_v8f16_v8i32_duplicate(<8 x half> %f) { -; CHECK-LABEL: test_unsigned_v8f16_v8i32_duplicate: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-NEXT: fcvtl v0.4s, v0.4h -; CHECK-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v8f16_v8i32_duplicate: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v8f16_v8i32_duplicate: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-GI-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-GI-NEXT: fcvtzu v0.4s, v1.4s +; CHECK-GI-NEXT: fcvtzu v1.4s, v2.4s +; CHECK-GI-NEXT: ret %x = call <8 x i32> @llvm.fptoui.sat.v8f16.v8i32(<8 x half> %f) ret <8 x i32> %x } define <8 x i50> @test_unsigned_v8f16_v8i50(<8 x half> %f) { -; CHECK-CVT-LABEL: test_unsigned_v8f16_v8i50: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-CVT-NEXT: mov h5, v0.h[1] -; CHECK-CVT-NEXT: mov x8, #1125899906842623 // =0x3ffffffffffff -; CHECK-CVT-NEXT: mov h6, v0.h[2] -; CHECK-CVT-NEXT: mov h7, v0.h[3] -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: mov h2, v1.h[1] -; CHECK-CVT-NEXT: mov h3, v1.h[2] -; CHECK-CVT-NEXT: mov h4, v1.h[3] -; CHECK-CVT-NEXT: fcvt s1, h1 -; CHECK-CVT-NEXT: fcvtzu x13, s0 -; CHECK-CVT-NEXT: fcvt s2, h2 -; CHECK-CVT-NEXT: fcvt s3, h3 -; CHECK-CVT-NEXT: fcvt s4, h4 -; CHECK-CVT-NEXT: fcvtzu x9, s1 -; CHECK-CVT-NEXT: fcvt s1, h5 -; CHECK-CVT-NEXT: fcvtzu x10, s2 -; CHECK-CVT-NEXT: fcvtzu x11, s3 -; CHECK-CVT-NEXT: fcvt s2, h6 -; CHECK-CVT-NEXT: fcvtzu x12, s4 -; CHECK-CVT-NEXT: fcvt s3, h7 -; CHECK-CVT-NEXT: cmp x9, x8 -; CHECK-CVT-NEXT: fcvtzu x14, s1 -; CHECK-CVT-NEXT: csel x4, x9, x8, lo -; CHECK-CVT-NEXT: cmp x10, x8 -; CHECK-CVT-NEXT: fcvtzu x9, s2 -; CHECK-CVT-NEXT: csel x5, x10, x8, lo -; CHECK-CVT-NEXT: cmp x11, x8 -; CHECK-CVT-NEXT: fcvtzu x10, s3 -; CHECK-CVT-NEXT: csel x6, x11, x8, lo -; CHECK-CVT-NEXT: cmp x12, x8 -; CHECK-CVT-NEXT: csel x7, x12, x8, lo -; CHECK-CVT-NEXT: cmp x13, x8 -; CHECK-CVT-NEXT: csel x0, x13, x8, lo -; CHECK-CVT-NEXT: cmp x14, x8 -; CHECK-CVT-NEXT: csel x1, x14, x8, lo -; CHECK-CVT-NEXT: cmp x9, x8 -; CHECK-CVT-NEXT: csel x2, x9, x8, lo -; CHECK-CVT-NEXT: cmp x10, x8 -; CHECK-CVT-NEXT: csel x3, x10, x8, lo -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_v8f16_v8i50: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-FP16-NEXT: mov x8, #1125899906842623 // =0x3ffffffffffff -; CHECK-FP16-NEXT: fcvtzu x13, h0 -; CHECK-FP16-NEXT: mov h2, v1.h[1] -; CHECK-FP16-NEXT: mov h3, v1.h[2] -; CHECK-FP16-NEXT: mov h4, v1.h[3] -; CHECK-FP16-NEXT: fcvtzu x9, h1 -; CHECK-FP16-NEXT: mov h1, v0.h[1] -; CHECK-FP16-NEXT: fcvtzu x10, h2 -; CHECK-FP16-NEXT: fcvtzu x11, h3 -; CHECK-FP16-NEXT: mov h2, v0.h[2] -; CHECK-FP16-NEXT: fcvtzu x12, h4 -; CHECK-FP16-NEXT: mov h3, v0.h[3] -; CHECK-FP16-NEXT: cmp x9, x8 -; CHECK-FP16-NEXT: fcvtzu x14, h1 -; CHECK-FP16-NEXT: csel x4, x9, x8, lo -; CHECK-FP16-NEXT: cmp x10, x8 -; CHECK-FP16-NEXT: fcvtzu x9, h2 -; CHECK-FP16-NEXT: csel x5, x10, x8, lo -; CHECK-FP16-NEXT: cmp x11, x8 -; CHECK-FP16-NEXT: fcvtzu x10, h3 -; CHECK-FP16-NEXT: csel x6, x11, x8, lo -; CHECK-FP16-NEXT: cmp x12, x8 -; CHECK-FP16-NEXT: csel x7, x12, x8, lo -; CHECK-FP16-NEXT: cmp x13, x8 -; CHECK-FP16-NEXT: csel x0, x13, x8, lo -; CHECK-FP16-NEXT: cmp x14, x8 -; CHECK-FP16-NEXT: csel x1, x14, x8, lo -; CHECK-FP16-NEXT: cmp x9, x8 -; CHECK-FP16-NEXT: csel x2, x9, x8, lo -; CHECK-FP16-NEXT: cmp x10, x8 -; CHECK-FP16-NEXT: csel x3, x10, x8, lo -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_v8f16_v8i50: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-CVT-NEXT: mov h5, v0.h[1] +; CHECK-SD-CVT-NEXT: mov x8, #1125899906842623 // =0x3ffffffffffff +; CHECK-SD-CVT-NEXT: mov h6, v0.h[2] +; CHECK-SD-CVT-NEXT: mov h7, v0.h[3] +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: mov h2, v1.h[1] +; CHECK-SD-CVT-NEXT: mov h3, v1.h[2] +; CHECK-SD-CVT-NEXT: mov h4, v1.h[3] +; CHECK-SD-CVT-NEXT: fcvt s1, h1 +; CHECK-SD-CVT-NEXT: fcvtzu x13, s0 +; CHECK-SD-CVT-NEXT: fcvt s2, h2 +; CHECK-SD-CVT-NEXT: fcvt s3, h3 +; CHECK-SD-CVT-NEXT: fcvt s4, h4 +; CHECK-SD-CVT-NEXT: fcvtzu x9, s1 +; CHECK-SD-CVT-NEXT: fcvt s1, h5 +; CHECK-SD-CVT-NEXT: fcvtzu x10, s2 +; CHECK-SD-CVT-NEXT: fcvtzu x11, s3 +; CHECK-SD-CVT-NEXT: fcvt s2, h6 +; CHECK-SD-CVT-NEXT: fcvtzu x12, s4 +; CHECK-SD-CVT-NEXT: fcvt s3, h7 +; CHECK-SD-CVT-NEXT: cmp x9, x8 +; CHECK-SD-CVT-NEXT: fcvtzu x14, s1 +; CHECK-SD-CVT-NEXT: csel x4, x9, x8, lo +; CHECK-SD-CVT-NEXT: cmp x10, x8 +; CHECK-SD-CVT-NEXT: fcvtzu x9, s2 +; CHECK-SD-CVT-NEXT: csel x5, x10, x8, lo +; CHECK-SD-CVT-NEXT: cmp x11, x8 +; CHECK-SD-CVT-NEXT: fcvtzu x10, s3 +; CHECK-SD-CVT-NEXT: csel x6, x11, x8, lo +; CHECK-SD-CVT-NEXT: cmp x12, x8 +; CHECK-SD-CVT-NEXT: csel x7, x12, x8, lo +; CHECK-SD-CVT-NEXT: cmp x13, x8 +; CHECK-SD-CVT-NEXT: csel x0, x13, x8, lo +; CHECK-SD-CVT-NEXT: cmp x14, x8 +; CHECK-SD-CVT-NEXT: csel x1, x14, x8, lo +; CHECK-SD-CVT-NEXT: cmp x9, x8 +; CHECK-SD-CVT-NEXT: csel x2, x9, x8, lo +; CHECK-SD-CVT-NEXT: cmp x10, x8 +; CHECK-SD-CVT-NEXT: csel x3, x10, x8, lo +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_v8f16_v8i50: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-FP16-NEXT: mov x8, #1125899906842623 // =0x3ffffffffffff +; CHECK-SD-FP16-NEXT: fcvtzu x13, h0 +; CHECK-SD-FP16-NEXT: mov h2, v1.h[1] +; CHECK-SD-FP16-NEXT: mov h3, v1.h[2] +; CHECK-SD-FP16-NEXT: mov h4, v1.h[3] +; CHECK-SD-FP16-NEXT: fcvtzu x9, h1 +; CHECK-SD-FP16-NEXT: mov h1, v0.h[1] +; CHECK-SD-FP16-NEXT: fcvtzu x10, h2 +; CHECK-SD-FP16-NEXT: fcvtzu x11, h3 +; CHECK-SD-FP16-NEXT: mov h2, v0.h[2] +; CHECK-SD-FP16-NEXT: fcvtzu x12, h4 +; CHECK-SD-FP16-NEXT: mov h3, v0.h[3] +; CHECK-SD-FP16-NEXT: cmp x9, x8 +; CHECK-SD-FP16-NEXT: fcvtzu x14, h1 +; CHECK-SD-FP16-NEXT: csel x4, x9, x8, lo +; CHECK-SD-FP16-NEXT: cmp x10, x8 +; CHECK-SD-FP16-NEXT: fcvtzu x9, h2 +; CHECK-SD-FP16-NEXT: csel x5, x10, x8, lo +; CHECK-SD-FP16-NEXT: cmp x11, x8 +; CHECK-SD-FP16-NEXT: fcvtzu x10, h3 +; CHECK-SD-FP16-NEXT: csel x6, x11, x8, lo +; CHECK-SD-FP16-NEXT: cmp x12, x8 +; CHECK-SD-FP16-NEXT: csel x7, x12, x8, lo +; CHECK-SD-FP16-NEXT: cmp x13, x8 +; CHECK-SD-FP16-NEXT: csel x0, x13, x8, lo +; CHECK-SD-FP16-NEXT: cmp x14, x8 +; CHECK-SD-FP16-NEXT: csel x1, x14, x8, lo +; CHECK-SD-FP16-NEXT: cmp x9, x8 +; CHECK-SD-FP16-NEXT: csel x2, x9, x8, lo +; CHECK-SD-FP16-NEXT: cmp x10, x8 +; CHECK-SD-FP16-NEXT: csel x3, x10, x8, lo +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v8f16_v8i50: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-GI-CVT-NEXT: mov h5, v0.h[1] +; CHECK-GI-CVT-NEXT: mov x8, #1125899906842623 // =0x3ffffffffffff +; CHECK-GI-CVT-NEXT: mov h6, v0.h[2] +; CHECK-GI-CVT-NEXT: mov h7, v0.h[3] +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov h2, v1.h[1] +; CHECK-GI-CVT-NEXT: mov h3, v1.h[2] +; CHECK-GI-CVT-NEXT: mov h4, v1.h[3] +; CHECK-GI-CVT-NEXT: fcvt s1, h1 +; CHECK-GI-CVT-NEXT: fcvtzu x13, s0 +; CHECK-GI-CVT-NEXT: fcvt s2, h2 +; CHECK-GI-CVT-NEXT: fcvt s3, h3 +; CHECK-GI-CVT-NEXT: fcvt s4, h4 +; CHECK-GI-CVT-NEXT: fcvtzu x9, s1 +; CHECK-GI-CVT-NEXT: fcvt s1, h5 +; CHECK-GI-CVT-NEXT: fcvtzu x10, s2 +; CHECK-GI-CVT-NEXT: fcvtzu x11, s3 +; CHECK-GI-CVT-NEXT: fcvt s2, h6 +; CHECK-GI-CVT-NEXT: fcvtzu x12, s4 +; CHECK-GI-CVT-NEXT: fcvt s3, h7 +; CHECK-GI-CVT-NEXT: cmp x9, x8 +; CHECK-GI-CVT-NEXT: fcvtzu x14, s1 +; CHECK-GI-CVT-NEXT: csel x4, x9, x8, lo +; CHECK-GI-CVT-NEXT: cmp x10, x8 +; CHECK-GI-CVT-NEXT: fcvtzu x9, s2 +; CHECK-GI-CVT-NEXT: csel x5, x10, x8, lo +; CHECK-GI-CVT-NEXT: cmp x11, x8 +; CHECK-GI-CVT-NEXT: fcvtzu x10, s3 +; CHECK-GI-CVT-NEXT: csel x6, x11, x8, lo +; CHECK-GI-CVT-NEXT: cmp x12, x8 +; CHECK-GI-CVT-NEXT: csel x7, x12, x8, lo +; CHECK-GI-CVT-NEXT: cmp x13, x8 +; CHECK-GI-CVT-NEXT: csel x0, x13, x8, lo +; CHECK-GI-CVT-NEXT: cmp x14, x8 +; CHECK-GI-CVT-NEXT: csel x1, x14, x8, lo +; CHECK-GI-CVT-NEXT: cmp x9, x8 +; CHECK-GI-CVT-NEXT: csel x2, x9, x8, lo +; CHECK-GI-CVT-NEXT: cmp x10, x8 +; CHECK-GI-CVT-NEXT: csel x3, x10, x8, lo +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v8f16_v8i50: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-GI-FP16-NEXT: mov x8, #1125899906842623 // =0x3ffffffffffff +; CHECK-GI-FP16-NEXT: fcvtzu x13, h0 +; CHECK-GI-FP16-NEXT: mov h2, v1.h[1] +; CHECK-GI-FP16-NEXT: mov h3, v1.h[2] +; CHECK-GI-FP16-NEXT: mov h4, v1.h[3] +; CHECK-GI-FP16-NEXT: fcvtzu x9, h1 +; CHECK-GI-FP16-NEXT: mov h1, v0.h[1] +; CHECK-GI-FP16-NEXT: fcvtzu x10, h2 +; CHECK-GI-FP16-NEXT: fcvtzu x11, h3 +; CHECK-GI-FP16-NEXT: mov h2, v0.h[2] +; CHECK-GI-FP16-NEXT: fcvtzu x12, h4 +; CHECK-GI-FP16-NEXT: mov h3, v0.h[3] +; CHECK-GI-FP16-NEXT: cmp x9, x8 +; CHECK-GI-FP16-NEXT: fcvtzu x14, h1 +; CHECK-GI-FP16-NEXT: csel x4, x9, x8, lo +; CHECK-GI-FP16-NEXT: cmp x10, x8 +; CHECK-GI-FP16-NEXT: fcvtzu x9, h2 +; CHECK-GI-FP16-NEXT: csel x5, x10, x8, lo +; CHECK-GI-FP16-NEXT: cmp x11, x8 +; CHECK-GI-FP16-NEXT: fcvtzu x10, h3 +; CHECK-GI-FP16-NEXT: csel x6, x11, x8, lo +; CHECK-GI-FP16-NEXT: cmp x12, x8 +; CHECK-GI-FP16-NEXT: csel x7, x12, x8, lo +; CHECK-GI-FP16-NEXT: cmp x13, x8 +; CHECK-GI-FP16-NEXT: csel x0, x13, x8, lo +; CHECK-GI-FP16-NEXT: cmp x14, x8 +; CHECK-GI-FP16-NEXT: csel x1, x14, x8, lo +; CHECK-GI-FP16-NEXT: cmp x9, x8 +; CHECK-GI-FP16-NEXT: csel x2, x9, x8, lo +; CHECK-GI-FP16-NEXT: cmp x10, x8 +; CHECK-GI-FP16-NEXT: csel x3, x10, x8, lo +; CHECK-GI-FP16-NEXT: ret %x = call <8 x i50> @llvm.fptoui.sat.v8f16.v8i50(<8 x half> %f) ret <8 x i50> %x } define <8 x i64> @test_unsigned_v8f16_v8i64(<8 x half> %f) { -; CHECK-CVT-LABEL: test_unsigned_v8f16_v8i64: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-CVT-NEXT: mov h4, v0.h[2] -; CHECK-CVT-NEXT: mov h3, v0.h[1] -; CHECK-CVT-NEXT: mov h7, v0.h[3] -; CHECK-CVT-NEXT: fcvt s0, h0 -; CHECK-CVT-NEXT: mov h2, v1.h[2] -; CHECK-CVT-NEXT: mov h5, v1.h[1] -; CHECK-CVT-NEXT: mov h6, v1.h[3] -; CHECK-CVT-NEXT: fcvt s1, h1 -; CHECK-CVT-NEXT: fcvt s4, h4 -; CHECK-CVT-NEXT: fcvt s3, h3 -; CHECK-CVT-NEXT: fcvt s7, h7 -; CHECK-CVT-NEXT: fcvtzu x9, s0 -; CHECK-CVT-NEXT: fcvt s2, h2 -; CHECK-CVT-NEXT: fcvt s5, h5 -; CHECK-CVT-NEXT: fcvt s6, h6 -; CHECK-CVT-NEXT: fcvtzu x8, s1 -; CHECK-CVT-NEXT: fcvtzu x12, s4 -; CHECK-CVT-NEXT: fcvtzu x11, s3 -; CHECK-CVT-NEXT: fcvtzu x15, s7 -; CHECK-CVT-NEXT: fmov d0, x9 -; CHECK-CVT-NEXT: fcvtzu x10, s2 -; CHECK-CVT-NEXT: fcvtzu x13, s5 -; CHECK-CVT-NEXT: fcvtzu x14, s6 -; CHECK-CVT-NEXT: fmov d2, x8 -; CHECK-CVT-NEXT: fmov d1, x12 -; CHECK-CVT-NEXT: mov v0.d[1], x11 -; CHECK-CVT-NEXT: fmov d3, x10 -; CHECK-CVT-NEXT: mov v2.d[1], x13 -; CHECK-CVT-NEXT: mov v1.d[1], x15 -; CHECK-CVT-NEXT: mov v3.d[1], x14 -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_v8f16_v8i64: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-FP16-NEXT: mov h4, v0.h[2] -; CHECK-FP16-NEXT: mov h3, v0.h[1] -; CHECK-FP16-NEXT: mov h7, v0.h[3] -; CHECK-FP16-NEXT: fcvtzu x9, h0 -; CHECK-FP16-NEXT: mov h2, v1.h[2] -; CHECK-FP16-NEXT: mov h5, v1.h[1] -; CHECK-FP16-NEXT: mov h6, v1.h[3] -; CHECK-FP16-NEXT: fcvtzu x8, h1 -; CHECK-FP16-NEXT: fcvtzu x12, h4 -; CHECK-FP16-NEXT: fcvtzu x11, h3 -; CHECK-FP16-NEXT: fcvtzu x15, h7 -; CHECK-FP16-NEXT: fmov d0, x9 -; CHECK-FP16-NEXT: fcvtzu x10, h2 -; CHECK-FP16-NEXT: fcvtzu x13, h5 -; CHECK-FP16-NEXT: fcvtzu x14, h6 -; CHECK-FP16-NEXT: fmov d2, x8 -; CHECK-FP16-NEXT: fmov d1, x12 -; CHECK-FP16-NEXT: mov v0.d[1], x11 -; CHECK-FP16-NEXT: fmov d3, x10 -; CHECK-FP16-NEXT: mov v2.d[1], x13 -; CHECK-FP16-NEXT: mov v1.d[1], x15 -; CHECK-FP16-NEXT: mov v3.d[1], x14 -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_v8f16_v8i64: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-CVT-NEXT: mov h4, v0.h[2] +; CHECK-SD-CVT-NEXT: mov h3, v0.h[1] +; CHECK-SD-CVT-NEXT: mov h7, v0.h[3] +; CHECK-SD-CVT-NEXT: fcvt s0, h0 +; CHECK-SD-CVT-NEXT: mov h2, v1.h[2] +; CHECK-SD-CVT-NEXT: mov h5, v1.h[1] +; CHECK-SD-CVT-NEXT: mov h6, v1.h[3] +; CHECK-SD-CVT-NEXT: fcvt s1, h1 +; CHECK-SD-CVT-NEXT: fcvt s4, h4 +; CHECK-SD-CVT-NEXT: fcvt s3, h3 +; CHECK-SD-CVT-NEXT: fcvt s7, h7 +; CHECK-SD-CVT-NEXT: fcvtzu x9, s0 +; CHECK-SD-CVT-NEXT: fcvt s2, h2 +; CHECK-SD-CVT-NEXT: fcvt s5, h5 +; CHECK-SD-CVT-NEXT: fcvt s6, h6 +; CHECK-SD-CVT-NEXT: fcvtzu x8, s1 +; CHECK-SD-CVT-NEXT: fcvtzu x12, s4 +; CHECK-SD-CVT-NEXT: fcvtzu x11, s3 +; CHECK-SD-CVT-NEXT: fcvtzu x15, s7 +; CHECK-SD-CVT-NEXT: fmov d0, x9 +; CHECK-SD-CVT-NEXT: fcvtzu x10, s2 +; CHECK-SD-CVT-NEXT: fcvtzu x13, s5 +; CHECK-SD-CVT-NEXT: fcvtzu x14, s6 +; CHECK-SD-CVT-NEXT: fmov d2, x8 +; CHECK-SD-CVT-NEXT: fmov d1, x12 +; CHECK-SD-CVT-NEXT: mov v0.d[1], x11 +; CHECK-SD-CVT-NEXT: fmov d3, x10 +; CHECK-SD-CVT-NEXT: mov v2.d[1], x13 +; CHECK-SD-CVT-NEXT: mov v1.d[1], x15 +; CHECK-SD-CVT-NEXT: mov v3.d[1], x14 +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_v8f16_v8i64: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-FP16-NEXT: mov h4, v0.h[2] +; CHECK-SD-FP16-NEXT: mov h3, v0.h[1] +; CHECK-SD-FP16-NEXT: mov h7, v0.h[3] +; CHECK-SD-FP16-NEXT: fcvtzu x9, h0 +; CHECK-SD-FP16-NEXT: mov h2, v1.h[2] +; CHECK-SD-FP16-NEXT: mov h5, v1.h[1] +; CHECK-SD-FP16-NEXT: mov h6, v1.h[3] +; CHECK-SD-FP16-NEXT: fcvtzu x8, h1 +; CHECK-SD-FP16-NEXT: fcvtzu x12, h4 +; CHECK-SD-FP16-NEXT: fcvtzu x11, h3 +; CHECK-SD-FP16-NEXT: fcvtzu x15, h7 +; CHECK-SD-FP16-NEXT: fmov d0, x9 +; CHECK-SD-FP16-NEXT: fcvtzu x10, h2 +; CHECK-SD-FP16-NEXT: fcvtzu x13, h5 +; CHECK-SD-FP16-NEXT: fcvtzu x14, h6 +; CHECK-SD-FP16-NEXT: fmov d2, x8 +; CHECK-SD-FP16-NEXT: fmov d1, x12 +; CHECK-SD-FP16-NEXT: mov v0.d[1], x11 +; CHECK-SD-FP16-NEXT: fmov d3, x10 +; CHECK-SD-FP16-NEXT: mov v2.d[1], x13 +; CHECK-SD-FP16-NEXT: mov v1.d[1], x15 +; CHECK-SD-FP16-NEXT: mov v3.d[1], x14 +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v8f16_v8i64: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v1.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-GI-CVT-NEXT: fcvtl v2.2d, v1.2s +; CHECK-GI-CVT-NEXT: fcvtl2 v1.2d, v1.4s +; CHECK-GI-CVT-NEXT: fcvtl v3.2d, v0.2s +; CHECK-GI-CVT-NEXT: fcvtl2 v4.2d, v0.4s +; CHECK-GI-CVT-NEXT: fcvtzu v0.2d, v2.2d +; CHECK-GI-CVT-NEXT: fcvtzu v1.2d, v1.2d +; CHECK-GI-CVT-NEXT: fcvtzu v2.2d, v3.2d +; CHECK-GI-CVT-NEXT: fcvtzu v3.2d, v4.2d +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v8f16_v8i64: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: mov s1, v0.s[1] +; CHECK-GI-FP16-NEXT: mov s2, v0.s[2] +; CHECK-GI-FP16-NEXT: mov s3, v0.s[3] +; CHECK-GI-FP16-NEXT: mov h4, v0.h[1] +; CHECK-GI-FP16-NEXT: fcvt d0, h0 +; CHECK-GI-FP16-NEXT: mov h5, v1.h[1] +; CHECK-GI-FP16-NEXT: mov h6, v2.h[1] +; CHECK-GI-FP16-NEXT: mov h7, v3.h[1] +; CHECK-GI-FP16-NEXT: fcvt d4, h4 +; CHECK-GI-FP16-NEXT: fcvt d1, h1 +; CHECK-GI-FP16-NEXT: fcvt d2, h2 +; CHECK-GI-FP16-NEXT: fcvt d3, h3 +; CHECK-GI-FP16-NEXT: fcvt d5, h5 +; CHECK-GI-FP16-NEXT: fcvt d6, h6 +; CHECK-GI-FP16-NEXT: fcvt d7, h7 +; CHECK-GI-FP16-NEXT: mov v0.d[1], v4.d[0] +; CHECK-GI-FP16-NEXT: mov v1.d[1], v5.d[0] +; CHECK-GI-FP16-NEXT: mov v2.d[1], v6.d[0] +; CHECK-GI-FP16-NEXT: mov v3.d[1], v7.d[0] +; CHECK-GI-FP16-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-FP16-NEXT: fcvtzu v1.2d, v1.2d +; CHECK-GI-FP16-NEXT: fcvtzu v2.2d, v2.2d +; CHECK-GI-FP16-NEXT: fcvtzu v3.2d, v3.2d +; CHECK-GI-FP16-NEXT: ret %x = call <8 x i64> @llvm.fptoui.sat.v8f16.v8i64(<8 x half> %f) ret <8 x i64> %x } define <8 x i100> @test_unsigned_v8f16_v8i100(<8 x half> %f) { -; CHECK-LABEL: test_unsigned_v8f16_v8i100: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #176 -; CHECK-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: stp x29, x30, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: stp x28, x27, [sp, #96] // 16-byte Folded Spill -; CHECK-NEXT: stp x26, x25, [sp, #112] // 16-byte Folded Spill -; CHECK-NEXT: stp x24, x23, [sp, #128] // 16-byte Folded Spill -; CHECK-NEXT: stp x22, x21, [sp, #144] // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #160] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 176 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w21, -24 -; CHECK-NEXT: .cfi_offset w22, -32 -; CHECK-NEXT: .cfi_offset w23, -40 -; CHECK-NEXT: .cfi_offset w24, -48 -; CHECK-NEXT: .cfi_offset w25, -56 -; CHECK-NEXT: .cfi_offset w26, -64 -; CHECK-NEXT: .cfi_offset w27, -72 -; CHECK-NEXT: .cfi_offset w28, -80 -; CHECK-NEXT: .cfi_offset w30, -88 -; CHECK-NEXT: .cfi_offset w29, -96 -; CHECK-NEXT: .cfi_offset b8, -104 -; CHECK-NEXT: .cfi_offset b9, -112 -; CHECK-NEXT: str q0, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: mov x19, x8 -; CHECK-NEXT: str q0, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: mov h0, v0.h[1] -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: mov w8, #1904214015 // =0x717fffff -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: fmov s9, w8 -; CHECK-NEXT: mov x23, #68719476735 // =0xfffffffff -; CHECK-NEXT: mov h0, v0.h[3] -; CHECK-NEXT: csel x9, xzr, x0, lt -; CHECK-NEXT: csel x8, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x10, x23, x8, gt -; CHECK-NEXT: csinv x8, x9, xzr, le -; CHECK-NEXT: stp x8, x10, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x9, x23, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: stp x8, x9, [sp] // 16-byte Folded Spill -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: mov h0, v0.h[2] -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: csel x25, x23, x9, gt -; CHECK-NEXT: str x8, [sp, #32] // 8-byte Folded Spill -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: mov h0, v0.h[1] -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x26, x23, x9, gt -; CHECK-NEXT: csinv x28, x8, xzr, le -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: mov h0, v0.h[3] -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x29, x23, x9, gt -; CHECK-NEXT: csinv x20, x8, xzr, le -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x21, x23, x9, gt -; CHECK-NEXT: csinv x27, x8, xzr, le -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: mov h0, v0.h[2] -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: fcvt s8, h0 -; CHECK-NEXT: csel x22, x23, x9, gt -; CHECK-NEXT: csinv x24, x8, xzr, le -; CHECK-NEXT: fmov s0, s8 -; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: extr x8, x21, x27, #28 -; CHECK-NEXT: extr x9, x29, x20, #28 -; CHECK-NEXT: stur x28, [x19, #75] -; CHECK-NEXT: fcmp s8, #0.0 -; CHECK-NEXT: bfi x22, x20, #36, #28 -; CHECK-NEXT: lsr x11, x29, #28 -; CHECK-NEXT: stur x8, [x19, #41] -; CHECK-NEXT: str x9, [x19, #16] -; CHECK-NEXT: ldr x10, [sp, #32] // 8-byte Folded Reload -; CHECK-NEXT: csel x8, xzr, x0, lt -; CHECK-NEXT: csel x9, xzr, x1, lt -; CHECK-NEXT: fcmp s8, s9 -; CHECK-NEXT: stp x24, x22, [x19] -; CHECK-NEXT: stur x10, [x19, #50] -; CHECK-NEXT: lsr x10, x21, #28 -; CHECK-NEXT: strb w11, [x19, #24] -; CHECK-NEXT: strb w10, [x19, #49] -; CHECK-NEXT: csel x9, x23, x9, gt -; CHECK-NEXT: csinv x8, x8, xzr, le -; CHECK-NEXT: ldp x12, x11, [sp] // 16-byte Folded Reload -; CHECK-NEXT: bfi x9, x27, #36, #28 -; CHECK-NEXT: stur x8, [x19, #25] -; CHECK-NEXT: stur x9, [x19, #33] -; CHECK-NEXT: extr x10, x11, x12, #28 -; CHECK-NEXT: bfi x26, x12, #36, #28 -; CHECK-NEXT: stur x10, [x19, #91] -; CHECK-NEXT: ldp x10, x9, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: stur x26, [x19, #83] -; CHECK-NEXT: extr x8, x9, x10, #28 -; CHECK-NEXT: bfi x25, x10, #36, #28 -; CHECK-NEXT: lsr x9, x9, #28 -; CHECK-NEXT: stur x8, [x19, #66] -; CHECK-NEXT: lsr x8, x11, #28 -; CHECK-NEXT: stur x25, [x19, #58] -; CHECK-NEXT: strb w8, [x19, #99] -; CHECK-NEXT: strb w9, [x19, #74] -; CHECK-NEXT: ldp x20, x19, [sp, #160] // 16-byte Folded Reload -; CHECK-NEXT: ldp x22, x21, [sp, #144] // 16-byte Folded Reload -; CHECK-NEXT: ldp x24, x23, [sp, #128] // 16-byte Folded Reload -; CHECK-NEXT: ldp x26, x25, [sp, #112] // 16-byte Folded Reload -; CHECK-NEXT: ldp x28, x27, [sp, #96] // 16-byte Folded Reload -; CHECK-NEXT: ldp x29, x30, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #176 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v8f16_v8i100: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #176 +; CHECK-SD-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x29, x30, [sp, #80] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x28, x27, [sp, #96] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x26, x25, [sp, #112] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x24, x23, [sp, #128] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x22, x21, [sp, #144] // 16-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #160] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 176 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w21, -24 +; CHECK-SD-NEXT: .cfi_offset w22, -32 +; CHECK-SD-NEXT: .cfi_offset w23, -40 +; CHECK-SD-NEXT: .cfi_offset w24, -48 +; CHECK-SD-NEXT: .cfi_offset w25, -56 +; CHECK-SD-NEXT: .cfi_offset w26, -64 +; CHECK-SD-NEXT: .cfi_offset w27, -72 +; CHECK-SD-NEXT: .cfi_offset w28, -80 +; CHECK-SD-NEXT: .cfi_offset w30, -88 +; CHECK-SD-NEXT: .cfi_offset w29, -96 +; CHECK-SD-NEXT: .cfi_offset b8, -104 +; CHECK-SD-NEXT: .cfi_offset b9, -112 +; CHECK-SD-NEXT: str q0, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: mov x19, x8 +; CHECK-SD-NEXT: str q0, [sp, #32] // 16-byte Folded Spill +; CHECK-SD-NEXT: mov h0, v0.h[1] +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov w8, #1904214015 // =0x717fffff +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: fmov s9, w8 +; CHECK-SD-NEXT: mov x23, #68719476735 // =0xfffffffff +; CHECK-SD-NEXT: mov h0, v0.h[3] +; CHECK-SD-NEXT: csel x9, xzr, x0, lt +; CHECK-SD-NEXT: csel x8, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x10, x23, x8, gt +; CHECK-SD-NEXT: csinv x8, x9, xzr, le +; CHECK-SD-NEXT: stp x8, x10, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x9, x23, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: stp x8, x9, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: mov h0, v0.h[2] +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: csel x25, x23, x9, gt +; CHECK-SD-NEXT: str x8, [sp, #32] // 8-byte Folded Spill +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: mov h0, v0.h[1] +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x26, x23, x9, gt +; CHECK-SD-NEXT: csinv x28, x8, xzr, le +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: mov h0, v0.h[3] +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x29, x23, x9, gt +; CHECK-SD-NEXT: csinv x20, x8, xzr, le +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x21, x23, x9, gt +; CHECK-SD-NEXT: csinv x27, x8, xzr, le +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: mov h0, v0.h[2] +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: fcvt s8, h0 +; CHECK-SD-NEXT: csel x22, x23, x9, gt +; CHECK-SD-NEXT: csinv x24, x8, xzr, le +; CHECK-SD-NEXT: fmov s0, s8 +; CHECK-SD-NEXT: bl __fixunssfti +; CHECK-SD-NEXT: extr x8, x21, x27, #28 +; CHECK-SD-NEXT: extr x9, x29, x20, #28 +; CHECK-SD-NEXT: stur x28, [x19, #75] +; CHECK-SD-NEXT: fcmp s8, #0.0 +; CHECK-SD-NEXT: bfi x22, x20, #36, #28 +; CHECK-SD-NEXT: lsr x11, x29, #28 +; CHECK-SD-NEXT: stur x8, [x19, #41] +; CHECK-SD-NEXT: str x9, [x19, #16] +; CHECK-SD-NEXT: ldr x10, [sp, #32] // 8-byte Folded Reload +; CHECK-SD-NEXT: csel x8, xzr, x0, lt +; CHECK-SD-NEXT: csel x9, xzr, x1, lt +; CHECK-SD-NEXT: fcmp s8, s9 +; CHECK-SD-NEXT: stp x24, x22, [x19] +; CHECK-SD-NEXT: stur x10, [x19, #50] +; CHECK-SD-NEXT: lsr x10, x21, #28 +; CHECK-SD-NEXT: strb w11, [x19, #24] +; CHECK-SD-NEXT: strb w10, [x19, #49] +; CHECK-SD-NEXT: csel x9, x23, x9, gt +; CHECK-SD-NEXT: csinv x8, x8, xzr, le +; CHECK-SD-NEXT: ldp x12, x11, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: bfi x9, x27, #36, #28 +; CHECK-SD-NEXT: stur x8, [x19, #25] +; CHECK-SD-NEXT: stur x9, [x19, #33] +; CHECK-SD-NEXT: extr x10, x11, x12, #28 +; CHECK-SD-NEXT: bfi x26, x12, #36, #28 +; CHECK-SD-NEXT: stur x10, [x19, #91] +; CHECK-SD-NEXT: ldp x10, x9, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: stur x26, [x19, #83] +; CHECK-SD-NEXT: extr x8, x9, x10, #28 +; CHECK-SD-NEXT: bfi x25, x10, #36, #28 +; CHECK-SD-NEXT: lsr x9, x9, #28 +; CHECK-SD-NEXT: stur x8, [x19, #66] +; CHECK-SD-NEXT: lsr x8, x11, #28 +; CHECK-SD-NEXT: stur x25, [x19, #58] +; CHECK-SD-NEXT: strb w8, [x19, #99] +; CHECK-SD-NEXT: strb w9, [x19, #74] +; CHECK-SD-NEXT: ldp x20, x19, [sp, #160] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x22, x21, [sp, #144] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x24, x23, [sp, #128] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x26, x25, [sp, #112] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x28, x27, [sp, #96] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp x29, x30, [sp, #80] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: add sp, sp, #176 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v8f16_v8i100: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: mov h1, v0.h[1] +; CHECK-GI-CVT-NEXT: mov h2, v0.h[2] +; CHECK-GI-CVT-NEXT: mov x11, x8 +; CHECK-GI-CVT-NEXT: fcvt s3, h0 +; CHECK-GI-CVT-NEXT: mov h4, v0.h[3] +; CHECK-GI-CVT-NEXT: str wzr, [x8, #8] +; CHECK-GI-CVT-NEXT: strb wzr, [x8, #12] +; CHECK-GI-CVT-NEXT: fcvt s1, h1 +; CHECK-GI-CVT-NEXT: fcvt s2, h2 +; CHECK-GI-CVT-NEXT: fcvtzu x9, s3 +; CHECK-GI-CVT-NEXT: fcvt s3, h4 +; CHECK-GI-CVT-NEXT: fcvtzu x10, s1 +; CHECK-GI-CVT-NEXT: mov h1, v0.h[4] +; CHECK-GI-CVT-NEXT: fcvtzu x12, s2 +; CHECK-GI-CVT-NEXT: mov h2, v0.h[5] +; CHECK-GI-CVT-NEXT: str x9, [x8] +; CHECK-GI-CVT-NEXT: mov x9, x8 +; CHECK-GI-CVT-NEXT: fcvt s1, h1 +; CHECK-GI-CVT-NEXT: str x10, [x11, #12]! +; CHECK-GI-CVT-NEXT: fcvtzu x10, s3 +; CHECK-GI-CVT-NEXT: mov h3, v0.h[6] +; CHECK-GI-CVT-NEXT: fcvt s2, h2 +; CHECK-GI-CVT-NEXT: mov h0, v0.h[7] +; CHECK-GI-CVT-NEXT: str wzr, [x11, #8] +; CHECK-GI-CVT-NEXT: strb wzr, [x11, #12] +; CHECK-GI-CVT-NEXT: mov x11, x8 +; CHECK-GI-CVT-NEXT: str x12, [x9, #25]! +; CHECK-GI-CVT-NEXT: fcvtzu x12, s1 +; CHECK-GI-CVT-NEXT: str wzr, [x9, #8] +; CHECK-GI-CVT-NEXT: fcvt s1, h3 +; CHECK-GI-CVT-NEXT: strb wzr, [x9, #12] +; CHECK-GI-CVT-NEXT: fcvt s0, h0 +; CHECK-GI-CVT-NEXT: mov x9, x8 +; CHECK-GI-CVT-NEXT: str x10, [x11, #37]! +; CHECK-GI-CVT-NEXT: fcvtzu x10, s2 +; CHECK-GI-CVT-NEXT: str wzr, [x11, #8] +; CHECK-GI-CVT-NEXT: strb wzr, [x11, #12] +; CHECK-GI-CVT-NEXT: fcvtzu x11, s1 +; CHECK-GI-CVT-NEXT: str x12, [x9, #50]! +; CHECK-GI-CVT-NEXT: str wzr, [x9, #8] +; CHECK-GI-CVT-NEXT: strb wzr, [x9, #12] +; CHECK-GI-CVT-NEXT: mov x9, x8 +; CHECK-GI-CVT-NEXT: str x10, [x9, #62]! +; CHECK-GI-CVT-NEXT: fcvtzu x10, s0 +; CHECK-GI-CVT-NEXT: str wzr, [x9, #8] +; CHECK-GI-CVT-NEXT: strb wzr, [x9, #12] +; CHECK-GI-CVT-NEXT: mov x9, x8 +; CHECK-GI-CVT-NEXT: str x11, [x9, #75]! +; CHECK-GI-CVT-NEXT: str wzr, [x9, #8] +; CHECK-GI-CVT-NEXT: strb wzr, [x9, #12] +; CHECK-GI-CVT-NEXT: str x10, [x8, #87]! +; CHECK-GI-CVT-NEXT: str wzr, [x8, #8] +; CHECK-GI-CVT-NEXT: strb wzr, [x8, #12] +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v8f16_v8i100: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: mov h1, v0.h[1] +; CHECK-GI-FP16-NEXT: mov h2, v0.h[2] +; CHECK-GI-FP16-NEXT: mov x11, x8 +; CHECK-GI-FP16-NEXT: fcvtzu x9, h0 +; CHECK-GI-FP16-NEXT: str wzr, [x8, #8] +; CHECK-GI-FP16-NEXT: strb wzr, [x8, #12] +; CHECK-GI-FP16-NEXT: fcvtzu x10, h1 +; CHECK-GI-FP16-NEXT: mov h1, v0.h[3] +; CHECK-GI-FP16-NEXT: fcvtzu x12, h2 +; CHECK-GI-FP16-NEXT: mov h2, v0.h[4] +; CHECK-GI-FP16-NEXT: str x9, [x8] +; CHECK-GI-FP16-NEXT: mov x9, x8 +; CHECK-GI-FP16-NEXT: str x10, [x11, #12]! +; CHECK-GI-FP16-NEXT: fcvtzu x10, h1 +; CHECK-GI-FP16-NEXT: mov h1, v0.h[5] +; CHECK-GI-FP16-NEXT: str wzr, [x11, #8] +; CHECK-GI-FP16-NEXT: strb wzr, [x11, #12] +; CHECK-GI-FP16-NEXT: mov x11, x8 +; CHECK-GI-FP16-NEXT: str x12, [x9, #25]! +; CHECK-GI-FP16-NEXT: fcvtzu x12, h2 +; CHECK-GI-FP16-NEXT: str wzr, [x9, #8] +; CHECK-GI-FP16-NEXT: mov h2, v0.h[6] +; CHECK-GI-FP16-NEXT: mov h0, v0.h[7] +; CHECK-GI-FP16-NEXT: strb wzr, [x9, #12] +; CHECK-GI-FP16-NEXT: fcvtzu x9, h1 +; CHECK-GI-FP16-NEXT: str x10, [x11, #37]! +; CHECK-GI-FP16-NEXT: mov x10, x8 +; CHECK-GI-FP16-NEXT: str wzr, [x11, #8] +; CHECK-GI-FP16-NEXT: strb wzr, [x11, #12] +; CHECK-GI-FP16-NEXT: fcvtzu x11, h2 +; CHECK-GI-FP16-NEXT: str x12, [x10, #50]! +; CHECK-GI-FP16-NEXT: str wzr, [x10, #8] +; CHECK-GI-FP16-NEXT: strb wzr, [x10, #12] +; CHECK-GI-FP16-NEXT: mov x10, x8 +; CHECK-GI-FP16-NEXT: str x9, [x10, #62]! +; CHECK-GI-FP16-NEXT: fcvtzu x9, h0 +; CHECK-GI-FP16-NEXT: str wzr, [x10, #8] +; CHECK-GI-FP16-NEXT: strb wzr, [x10, #12] +; CHECK-GI-FP16-NEXT: mov x10, x8 +; CHECK-GI-FP16-NEXT: str x11, [x10, #75]! +; CHECK-GI-FP16-NEXT: str wzr, [x10, #8] +; CHECK-GI-FP16-NEXT: strb wzr, [x10, #12] +; CHECK-GI-FP16-NEXT: str x9, [x8, #87]! +; CHECK-GI-FP16-NEXT: str wzr, [x8, #8] +; CHECK-GI-FP16-NEXT: strb wzr, [x8, #12] +; CHECK-GI-FP16-NEXT: ret %x = call <8 x i100> @llvm.fptoui.sat.v8f16.v8i100(<8 x half> %f) ret <8 x i100> %x } @@ -2305,64 +3892,116 @@ declare <16 x i8> @llvm.fptoui.sat.v16f64.v16i8(<16 x double> %f) declare <16 x i16> @llvm.fptoui.sat.v16f64.v16i16(<16 x double> %f) define <8 x i8> @test_unsigned_v8f32_v8i8(<8 x float> %f) { -; CHECK-LABEL: test_unsigned_v8f32_v8i8: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v2.2d, #0x0000ff000000ff -; CHECK-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-NEXT: umin v1.4s, v1.4s, v2.4s -; CHECK-NEXT: umin v0.4s, v0.4s, v2.4s -; CHECK-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-NEXT: xtn v0.8b, v0.8h -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v8f32_v8i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v2.2d, #0x0000ff000000ff +; CHECK-SD-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-NEXT: umin v1.4s, v1.4s, v2.4s +; CHECK-SD-NEXT: umin v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v8f32_v8i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v2.2d, #0x0000ff000000ff +; CHECK-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-GI-NEXT: umin v0.4s, v0.4s, v2.4s +; CHECK-GI-NEXT: umin v1.4s, v1.4s, v2.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-GI-NEXT: ret %x = call <8 x i8> @llvm.fptoui.sat.v8f32.v8i8(<8 x float> %f) ret <8 x i8> %x } define <16 x i8> @test_unsigned_v16f32_v16i8(<16 x float> %f) { -; CHECK-LABEL: test_unsigned_v16f32_v16i8: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v4.2d, #0x0000ff000000ff -; CHECK-NEXT: fcvtzu v3.4s, v3.4s -; CHECK-NEXT: fcvtzu v2.4s, v2.4s -; CHECK-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-NEXT: umin v3.4s, v3.4s, v4.4s -; CHECK-NEXT: umin v2.4s, v2.4s, v4.4s -; CHECK-NEXT: umin v1.4s, v1.4s, v4.4s -; CHECK-NEXT: umin v0.4s, v0.4s, v4.4s -; CHECK-NEXT: uzp1 v2.8h, v2.8h, v3.8h -; CHECK-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-NEXT: uzp1 v0.16b, v0.16b, v2.16b -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v16f32_v16i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v4.2d, #0x0000ff000000ff +; CHECK-SD-NEXT: fcvtzu v3.4s, v3.4s +; CHECK-SD-NEXT: fcvtzu v2.4s, v2.4s +; CHECK-SD-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-NEXT: umin v3.4s, v3.4s, v4.4s +; CHECK-SD-NEXT: umin v2.4s, v2.4s, v4.4s +; CHECK-SD-NEXT: umin v1.4s, v1.4s, v4.4s +; CHECK-SD-NEXT: umin v0.4s, v0.4s, v4.4s +; CHECK-SD-NEXT: uzp1 v2.8h, v2.8h, v3.8h +; CHECK-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-SD-NEXT: uzp1 v0.16b, v0.16b, v2.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v16f32_v16i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v4.2d, #0x0000ff000000ff +; CHECK-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-GI-NEXT: fcvtzu v2.4s, v2.4s +; CHECK-GI-NEXT: fcvtzu v3.4s, v3.4s +; CHECK-GI-NEXT: umin v0.4s, v0.4s, v4.4s +; CHECK-GI-NEXT: umin v1.4s, v1.4s, v4.4s +; CHECK-GI-NEXT: umin v2.4s, v2.4s, v4.4s +; CHECK-GI-NEXT: umin v3.4s, v3.4s, v4.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: uzp1 v1.8h, v2.8h, v3.8h +; CHECK-GI-NEXT: uzp1 v0.16b, v0.16b, v1.16b +; CHECK-GI-NEXT: ret %x = call <16 x i8> @llvm.fptoui.sat.v16f32.v16i8(<16 x float> %f) ret <16 x i8> %x } define <8 x i16> @test_unsigned_v8f32_v8i16(<8 x float> %f) { -; CHECK-LABEL: test_unsigned_v8f32_v8i16: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-NEXT: uqxtn v0.4h, v0.4s -; CHECK-NEXT: uqxtn2 v0.8h, v1.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v8f32_v8i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-SD-NEXT: uqxtn v0.4h, v0.4s +; CHECK-SD-NEXT: uqxtn2 v0.8h, v1.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v8f32_v8i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v2.2d, #0x00ffff0000ffff +; CHECK-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-GI-NEXT: umin v0.4s, v0.4s, v2.4s +; CHECK-GI-NEXT: umin v1.4s, v1.4s, v2.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: ret %x = call <8 x i16> @llvm.fptoui.sat.v8f32.v8i16(<8 x float> %f) ret <8 x i16> %x } define <16 x i16> @test_unsigned_v16f32_v16i16(<16 x float> %f) { -; CHECK-LABEL: test_unsigned_v16f32_v16i16: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-NEXT: fcvtzu v2.4s, v2.4s -; CHECK-NEXT: fcvtzu v4.4s, v1.4s -; CHECK-NEXT: uqxtn v0.4h, v0.4s -; CHECK-NEXT: uqxtn v1.4h, v2.4s -; CHECK-NEXT: fcvtzu v2.4s, v3.4s -; CHECK-NEXT: uqxtn2 v0.8h, v4.4s -; CHECK-NEXT: uqxtn2 v1.8h, v2.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v16f32_v16i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-NEXT: fcvtzu v2.4s, v2.4s +; CHECK-SD-NEXT: fcvtzu v4.4s, v1.4s +; CHECK-SD-NEXT: uqxtn v0.4h, v0.4s +; CHECK-SD-NEXT: uqxtn v1.4h, v2.4s +; CHECK-SD-NEXT: fcvtzu v2.4s, v3.4s +; CHECK-SD-NEXT: uqxtn2 v0.8h, v4.4s +; CHECK-SD-NEXT: uqxtn2 v1.8h, v2.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v16f32_v16i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v4.2d, #0x00ffff0000ffff +; CHECK-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-GI-NEXT: fcvtzu v2.4s, v2.4s +; CHECK-GI-NEXT: fcvtzu v3.4s, v3.4s +; CHECK-GI-NEXT: umin v0.4s, v0.4s, v4.4s +; CHECK-GI-NEXT: umin v1.4s, v1.4s, v4.4s +; CHECK-GI-NEXT: umin v2.4s, v2.4s, v4.4s +; CHECK-GI-NEXT: umin v3.4s, v3.4s, v4.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: uzp1 v1.8h, v2.8h, v3.8h +; CHECK-GI-NEXT: ret %x = call <16 x i16> @llvm.fptoui.sat.v16f32.v16i16(<16 x float> %f) ret <16 x i16> %x } @@ -2370,344 +4009,511 @@ define <16 x i16> @test_unsigned_v16f32_v16i16(<16 x float> %f) { define <16 x i8> @test_unsigned_v16f16_v16i8(<16 x half> %f) { -; CHECK-CVT-LABEL: test_unsigned_v16f16_v16i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl2 v4.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: movi v2.2d, #0x0000ff000000ff -; CHECK-CVT-NEXT: fcvtzu v3.4s, v3.4s -; CHECK-CVT-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtzu v4.4s, v4.4s -; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-CVT-NEXT: umin v3.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: umin v1.4s, v1.4s, v2.4s -; CHECK-CVT-NEXT: umin v4.4s, v4.4s, v2.4s -; CHECK-CVT-NEXT: umin v0.4s, v0.4s, v2.4s -; CHECK-CVT-NEXT: uzp1 v1.8h, v1.8h, v3.8h -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v4.8h -; CHECK-CVT-NEXT: uzp1 v0.16b, v0.16b, v1.16b -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_v16f16_v16i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu v0.8h, v0.8h -; CHECK-FP16-NEXT: fcvtzu v1.8h, v1.8h -; CHECK-FP16-NEXT: uqxtn v0.8b, v0.8h -; CHECK-FP16-NEXT: uqxtn2 v0.16b, v1.8h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_v16f16_v16i8: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl2 v3.4s, v1.8h +; CHECK-SD-CVT-NEXT: fcvtl v1.4s, v1.4h +; CHECK-SD-CVT-NEXT: fcvtl2 v4.4s, v0.8h +; CHECK-SD-CVT-NEXT: fcvtl v0.4s, v0.4h +; CHECK-SD-CVT-NEXT: movi v2.2d, #0x0000ff000000ff +; CHECK-SD-CVT-NEXT: fcvtzu v3.4s, v3.4s +; CHECK-SD-CVT-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-SD-CVT-NEXT: fcvtzu v4.4s, v4.4s +; CHECK-SD-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-SD-CVT-NEXT: umin v3.4s, v3.4s, v2.4s +; CHECK-SD-CVT-NEXT: umin v1.4s, v1.4s, v2.4s +; CHECK-SD-CVT-NEXT: umin v4.4s, v4.4s, v2.4s +; CHECK-SD-CVT-NEXT: umin v0.4s, v0.4s, v2.4s +; CHECK-SD-CVT-NEXT: uzp1 v1.8h, v1.8h, v3.8h +; CHECK-SD-CVT-NEXT: uzp1 v0.8h, v0.8h, v4.8h +; CHECK-SD-CVT-NEXT: uzp1 v0.16b, v0.16b, v1.16b +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_v16f16_v16i8: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu v0.8h, v0.8h +; CHECK-SD-FP16-NEXT: fcvtzu v1.8h, v1.8h +; CHECK-SD-FP16-NEXT: uqxtn v0.8b, v0.8h +; CHECK-SD-FP16-NEXT: uqxtn2 v0.16b, v1.8h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v16f16_v16i8: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v3.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-GI-CVT-NEXT: fcvtl v4.4s, v1.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-GI-CVT-NEXT: movi v2.2d, #0x0000ff000000ff +; CHECK-GI-CVT-NEXT: fcvtzu v3.4s, v3.4s +; CHECK-GI-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: fcvtzu v4.4s, v4.4s +; CHECK-GI-CVT-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-GI-CVT-NEXT: umin v3.4s, v3.4s, v2.4s +; CHECK-GI-CVT-NEXT: umin v0.4s, v0.4s, v2.4s +; CHECK-GI-CVT-NEXT: umin v4.4s, v4.4s, v2.4s +; CHECK-GI-CVT-NEXT: umin v1.4s, v1.4s, v2.4s +; CHECK-GI-CVT-NEXT: uzp1 v0.8h, v3.8h, v0.8h +; CHECK-GI-CVT-NEXT: uzp1 v1.8h, v4.8h, v1.8h +; CHECK-GI-CVT-NEXT: uzp1 v0.16b, v0.16b, v1.16b +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v16f16_v16i8: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: movi v2.2d, #0xff00ff00ff00ff +; CHECK-GI-FP16-NEXT: fcvtzu v0.8h, v0.8h +; CHECK-GI-FP16-NEXT: fcvtzu v1.8h, v1.8h +; CHECK-GI-FP16-NEXT: umin v0.8h, v0.8h, v2.8h +; CHECK-GI-FP16-NEXT: umin v1.8h, v1.8h, v2.8h +; CHECK-GI-FP16-NEXT: uzp1 v0.16b, v0.16b, v1.16b +; CHECK-GI-FP16-NEXT: ret %x = call <16 x i8> @llvm.fptoui.sat.v16f16.v16i8(<16 x half> %f) ret <16 x i8> %x } define <16 x i16> @test_unsigned_v16f16_v16i16(<16 x half> %f) { -; CHECK-CVT-LABEL: test_unsigned_v16f16_v16i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v2.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v3.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl2 v4.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl2 v5.4s, v1.8h -; CHECK-CVT-NEXT: fcvtzu v2.4s, v2.4s -; CHECK-CVT-NEXT: fcvtzu v1.4s, v3.4s -; CHECK-CVT-NEXT: fcvtzu v3.4s, v5.4s -; CHECK-CVT-NEXT: uqxtn v0.4h, v2.4s -; CHECK-CVT-NEXT: fcvtzu v2.4s, v4.4s -; CHECK-CVT-NEXT: uqxtn v1.4h, v1.4s -; CHECK-CVT-NEXT: uqxtn2 v0.8h, v2.4s -; CHECK-CVT-NEXT: uqxtn2 v1.8h, v3.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: test_unsigned_v16f16_v16i16: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzu v0.8h, v0.8h -; CHECK-FP16-NEXT: fcvtzu v1.8h, v1.8h -; CHECK-FP16-NEXT: ret +; CHECK-SD-CVT-LABEL: test_unsigned_v16f16_v16i16: +; CHECK-SD-CVT: // %bb.0: +; CHECK-SD-CVT-NEXT: fcvtl v2.4s, v0.4h +; CHECK-SD-CVT-NEXT: fcvtl v3.4s, v1.4h +; CHECK-SD-CVT-NEXT: fcvtl2 v4.4s, v0.8h +; CHECK-SD-CVT-NEXT: fcvtl2 v5.4s, v1.8h +; CHECK-SD-CVT-NEXT: fcvtzu v2.4s, v2.4s +; CHECK-SD-CVT-NEXT: fcvtzu v1.4s, v3.4s +; CHECK-SD-CVT-NEXT: fcvtzu v3.4s, v5.4s +; CHECK-SD-CVT-NEXT: uqxtn v0.4h, v2.4s +; CHECK-SD-CVT-NEXT: fcvtzu v2.4s, v4.4s +; CHECK-SD-CVT-NEXT: uqxtn v1.4h, v1.4s +; CHECK-SD-CVT-NEXT: uqxtn2 v0.8h, v2.4s +; CHECK-SD-CVT-NEXT: uqxtn2 v1.8h, v3.4s +; CHECK-SD-CVT-NEXT: ret +; +; CHECK-SD-FP16-LABEL: test_unsigned_v16f16_v16i16: +; CHECK-SD-FP16: // %bb.0: +; CHECK-SD-FP16-NEXT: fcvtzu v0.8h, v0.8h +; CHECK-SD-FP16-NEXT: fcvtzu v1.8h, v1.8h +; CHECK-SD-FP16-NEXT: ret +; +; CHECK-GI-CVT-LABEL: test_unsigned_v16f16_v16i16: +; CHECK-GI-CVT: // %bb.0: +; CHECK-GI-CVT-NEXT: fcvtl v3.4s, v0.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-GI-CVT-NEXT: fcvtl v4.4s, v1.4h +; CHECK-GI-CVT-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-GI-CVT-NEXT: movi v2.2d, #0x00ffff0000ffff +; CHECK-GI-CVT-NEXT: fcvtzu v3.4s, v3.4s +; CHECK-GI-CVT-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-GI-CVT-NEXT: fcvtzu v4.4s, v4.4s +; CHECK-GI-CVT-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-GI-CVT-NEXT: umin v3.4s, v3.4s, v2.4s +; CHECK-GI-CVT-NEXT: umin v0.4s, v0.4s, v2.4s +; CHECK-GI-CVT-NEXT: umin v4.4s, v4.4s, v2.4s +; CHECK-GI-CVT-NEXT: umin v1.4s, v1.4s, v2.4s +; CHECK-GI-CVT-NEXT: uzp1 v0.8h, v3.8h, v0.8h +; CHECK-GI-CVT-NEXT: uzp1 v1.8h, v4.8h, v1.8h +; CHECK-GI-CVT-NEXT: ret +; +; CHECK-GI-FP16-LABEL: test_unsigned_v16f16_v16i16: +; CHECK-GI-FP16: // %bb.0: +; CHECK-GI-FP16-NEXT: fcvtzu v0.8h, v0.8h +; CHECK-GI-FP16-NEXT: fcvtzu v1.8h, v1.8h +; CHECK-GI-FP16-NEXT: ret %x = call <16 x i16> @llvm.fptoui.sat.v16f16.v16i16(<16 x half> %f) ret <16 x i16> %x } define <8 x i8> @test_unsigned_v8f64_v8i8(<8 x double> %f) { -; CHECK-LABEL: test_unsigned_v8f64_v8i8: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d4, v3.d[1] -; CHECK-NEXT: mov d5, v2.d[1] -; CHECK-NEXT: mov w11, #255 // =0xff -; CHECK-NEXT: fcvtzu w9, d3 -; CHECK-NEXT: mov d3, v1.d[1] -; CHECK-NEXT: fcvtzu w12, d2 -; CHECK-NEXT: fcvtzu w14, d1 -; CHECK-NEXT: fcvtzu w8, d4 -; CHECK-NEXT: mov d4, v0.d[1] -; CHECK-NEXT: fcvtzu w10, d5 -; CHECK-NEXT: fcvtzu w13, d3 -; CHECK-NEXT: cmp w8, #255 -; CHECK-NEXT: fcvtzu w15, d4 -; CHECK-NEXT: csel w8, w8, w11, lo -; CHECK-NEXT: cmp w9, #255 -; CHECK-NEXT: csel w9, w9, w11, lo -; CHECK-NEXT: cmp w10, #255 -; CHECK-NEXT: fmov s4, w9 -; CHECK-NEXT: csel w9, w10, w11, lo -; CHECK-NEXT: cmp w12, #255 -; CHECK-NEXT: fcvtzu w10, d0 -; CHECK-NEXT: mov v4.s[1], w8 -; CHECK-NEXT: csel w8, w12, w11, lo -; CHECK-NEXT: cmp w13, #255 -; CHECK-NEXT: fmov s3, w8 -; CHECK-NEXT: csel w8, w13, w11, lo -; CHECK-NEXT: cmp w14, #255 -; CHECK-NEXT: mov v3.s[1], w9 -; CHECK-NEXT: csel w9, w14, w11, lo -; CHECK-NEXT: cmp w15, #255 -; CHECK-NEXT: fmov s2, w9 -; CHECK-NEXT: csel w9, w15, w11, lo -; CHECK-NEXT: cmp w10, #255 -; CHECK-NEXT: mov v2.s[1], w8 -; CHECK-NEXT: csel w8, w10, w11, lo -; CHECK-NEXT: fmov s1, w8 -; CHECK-NEXT: adrp x8, .LCPI82_0 -; CHECK-NEXT: ldr d0, [x8, :lo12:.LCPI82_0] -; CHECK-NEXT: mov v1.s[1], w9 -; CHECK-NEXT: tbl v0.8b, { v1.16b, v2.16b, v3.16b, v4.16b }, v0.8b -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v8f64_v8i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d4, v3.d[1] +; CHECK-SD-NEXT: mov d5, v2.d[1] +; CHECK-SD-NEXT: mov w11, #255 // =0xff +; CHECK-SD-NEXT: fcvtzu w9, d3 +; CHECK-SD-NEXT: mov d3, v1.d[1] +; CHECK-SD-NEXT: fcvtzu w12, d2 +; CHECK-SD-NEXT: fcvtzu w14, d1 +; CHECK-SD-NEXT: fcvtzu w8, d4 +; CHECK-SD-NEXT: mov d4, v0.d[1] +; CHECK-SD-NEXT: fcvtzu w10, d5 +; CHECK-SD-NEXT: fcvtzu w13, d3 +; CHECK-SD-NEXT: cmp w8, #255 +; CHECK-SD-NEXT: fcvtzu w15, d4 +; CHECK-SD-NEXT: csel w8, w8, w11, lo +; CHECK-SD-NEXT: cmp w9, #255 +; CHECK-SD-NEXT: csel w9, w9, w11, lo +; CHECK-SD-NEXT: cmp w10, #255 +; CHECK-SD-NEXT: fmov s4, w9 +; CHECK-SD-NEXT: csel w9, w10, w11, lo +; CHECK-SD-NEXT: cmp w12, #255 +; CHECK-SD-NEXT: fcvtzu w10, d0 +; CHECK-SD-NEXT: mov v4.s[1], w8 +; CHECK-SD-NEXT: csel w8, w12, w11, lo +; CHECK-SD-NEXT: cmp w13, #255 +; CHECK-SD-NEXT: fmov s3, w8 +; CHECK-SD-NEXT: csel w8, w13, w11, lo +; CHECK-SD-NEXT: cmp w14, #255 +; CHECK-SD-NEXT: mov v3.s[1], w9 +; CHECK-SD-NEXT: csel w9, w14, w11, lo +; CHECK-SD-NEXT: cmp w15, #255 +; CHECK-SD-NEXT: fmov s2, w9 +; CHECK-SD-NEXT: csel w9, w15, w11, lo +; CHECK-SD-NEXT: cmp w10, #255 +; CHECK-SD-NEXT: mov v2.s[1], w8 +; CHECK-SD-NEXT: csel w8, w10, w11, lo +; CHECK-SD-NEXT: fmov s1, w8 +; CHECK-SD-NEXT: adrp x8, .LCPI82_0 +; CHECK-SD-NEXT: ldr d0, [x8, :lo12:.LCPI82_0] +; CHECK-SD-NEXT: mov v1.s[1], w9 +; CHECK-SD-NEXT: tbl v0.8b, { v1.16b, v2.16b, v3.16b, v4.16b }, v0.8b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v8f64_v8i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v4.2d, #0x000000000000ff +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: fcvtzu v1.2d, v1.2d +; CHECK-GI-NEXT: fcvtzu v2.2d, v2.2d +; CHECK-GI-NEXT: fcvtzu v3.2d, v3.2d +; CHECK-GI-NEXT: cmhi v5.2d, v4.2d, v0.2d +; CHECK-GI-NEXT: cmhi v6.2d, v4.2d, v1.2d +; CHECK-GI-NEXT: cmhi v7.2d, v4.2d, v2.2d +; CHECK-GI-NEXT: cmhi v16.2d, v4.2d, v3.2d +; CHECK-GI-NEXT: bif v0.16b, v4.16b, v5.16b +; CHECK-GI-NEXT: bif v1.16b, v4.16b, v6.16b +; CHECK-GI-NEXT: bif v2.16b, v4.16b, v7.16b +; CHECK-GI-NEXT: bif v3.16b, v4.16b, v16.16b +; CHECK-GI-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: uzp1 v1.4s, v2.4s, v3.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-GI-NEXT: ret %x = call <8 x i8> @llvm.fptoui.sat.v8f64.v8i8(<8 x double> %f) ret <8 x i8> %x } define <16 x i8> @test_unsigned_v16f64_v16i8(<16 x double> %f) { -; CHECK-LABEL: test_unsigned_v16f64_v16i8: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d16, v0.d[1] -; CHECK-NEXT: fcvtzu w10, d0 -; CHECK-NEXT: mov w8, #255 // =0xff -; CHECK-NEXT: fcvtzu w9, d16 -; CHECK-NEXT: mov d16, v1.d[1] -; CHECK-NEXT: cmp w9, #255 -; CHECK-NEXT: csel w9, w9, w8, lo -; CHECK-NEXT: cmp w10, #255 -; CHECK-NEXT: csel w10, w10, w8, lo -; CHECK-NEXT: fmov s0, w10 -; CHECK-NEXT: fcvtzu w10, d16 -; CHECK-NEXT: mov d16, v2.d[1] -; CHECK-NEXT: mov v0.s[1], w9 -; CHECK-NEXT: fcvtzu w9, d1 -; CHECK-NEXT: cmp w10, #255 -; CHECK-NEXT: csel w10, w10, w8, lo -; CHECK-NEXT: cmp w9, #255 -; CHECK-NEXT: mov w11, v0.s[1] -; CHECK-NEXT: csel w9, w9, w8, lo -; CHECK-NEXT: fmov s1, w9 -; CHECK-NEXT: fcvtzu w9, d16 -; CHECK-NEXT: mov d16, v3.d[1] -; CHECK-NEXT: mov v0.b[1], w11 -; CHECK-NEXT: mov v1.s[1], w10 -; CHECK-NEXT: fcvtzu w10, d2 -; CHECK-NEXT: cmp w9, #255 -; CHECK-NEXT: csel w9, w9, w8, lo -; CHECK-NEXT: cmp w10, #255 -; CHECK-NEXT: mov w11, v1.s[1] -; CHECK-NEXT: mov v0.b[2], v1.b[0] -; CHECK-NEXT: csel w10, w10, w8, lo -; CHECK-NEXT: fmov s2, w10 -; CHECK-NEXT: fcvtzu w10, d16 -; CHECK-NEXT: mov d16, v4.d[1] -; CHECK-NEXT: mov v0.b[3], w11 -; CHECK-NEXT: mov v2.s[1], w9 -; CHECK-NEXT: fcvtzu w9, d3 -; CHECK-NEXT: cmp w10, #255 -; CHECK-NEXT: csel w10, w10, w8, lo -; CHECK-NEXT: cmp w9, #255 -; CHECK-NEXT: mov w11, v2.s[1] -; CHECK-NEXT: mov v0.b[4], v2.b[0] -; CHECK-NEXT: csel w9, w9, w8, lo -; CHECK-NEXT: fmov s3, w9 -; CHECK-NEXT: fcvtzu w9, d16 -; CHECK-NEXT: mov d16, v5.d[1] -; CHECK-NEXT: mov v0.b[5], w11 -; CHECK-NEXT: mov v3.s[1], w10 -; CHECK-NEXT: fcvtzu w10, d4 -; CHECK-NEXT: cmp w9, #255 -; CHECK-NEXT: csel w9, w9, w8, lo -; CHECK-NEXT: cmp w10, #255 -; CHECK-NEXT: mov w11, v3.s[1] -; CHECK-NEXT: mov v0.b[6], v3.b[0] -; CHECK-NEXT: csel w10, w10, w8, lo -; CHECK-NEXT: fmov s4, w10 -; CHECK-NEXT: fcvtzu w10, d16 -; CHECK-NEXT: mov v0.b[7], w11 -; CHECK-NEXT: mov v4.s[1], w9 -; CHECK-NEXT: fcvtzu w9, d5 -; CHECK-NEXT: mov d5, v6.d[1] -; CHECK-NEXT: cmp w10, #255 -; CHECK-NEXT: csel w10, w10, w8, lo -; CHECK-NEXT: cmp w9, #255 -; CHECK-NEXT: mov w11, v4.s[1] -; CHECK-NEXT: mov v0.b[8], v4.b[0] -; CHECK-NEXT: csel w9, w9, w8, lo -; CHECK-NEXT: fmov s16, w9 -; CHECK-NEXT: fcvtzu w9, d5 -; CHECK-NEXT: mov d5, v7.d[1] -; CHECK-NEXT: mov v0.b[9], w11 -; CHECK-NEXT: mov v16.s[1], w10 -; CHECK-NEXT: fcvtzu w10, d6 -; CHECK-NEXT: cmp w9, #255 -; CHECK-NEXT: csel w9, w9, w8, lo -; CHECK-NEXT: cmp w10, #255 -; CHECK-NEXT: mov v0.b[10], v16.b[0] -; CHECK-NEXT: mov w11, v16.s[1] -; CHECK-NEXT: csel w10, w10, w8, lo -; CHECK-NEXT: fmov s6, w10 -; CHECK-NEXT: fcvtzu w10, d7 -; CHECK-NEXT: mov v0.b[11], w11 -; CHECK-NEXT: mov v6.s[1], w9 -; CHECK-NEXT: fcvtzu w9, d5 -; CHECK-NEXT: cmp w9, #255 -; CHECK-NEXT: mov v0.b[12], v6.b[0] -; CHECK-NEXT: mov w11, v6.s[1] -; CHECK-NEXT: csel w9, w9, w8, lo -; CHECK-NEXT: cmp w10, #255 -; CHECK-NEXT: csel w8, w10, w8, lo -; CHECK-NEXT: fmov s5, w8 -; CHECK-NEXT: mov v0.b[13], w11 -; CHECK-NEXT: mov v5.s[1], w9 -; CHECK-NEXT: mov v0.b[14], v5.b[0] -; CHECK-NEXT: mov w8, v5.s[1] -; CHECK-NEXT: mov v0.b[15], w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v16f64_v16i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d16, v0.d[1] +; CHECK-SD-NEXT: fcvtzu w10, d0 +; CHECK-SD-NEXT: mov w8, #255 // =0xff +; CHECK-SD-NEXT: fcvtzu w9, d16 +; CHECK-SD-NEXT: mov d16, v1.d[1] +; CHECK-SD-NEXT: cmp w9, #255 +; CHECK-SD-NEXT: csel w9, w9, w8, lo +; CHECK-SD-NEXT: cmp w10, #255 +; CHECK-SD-NEXT: csel w10, w10, w8, lo +; CHECK-SD-NEXT: fmov s0, w10 +; CHECK-SD-NEXT: fcvtzu w10, d16 +; CHECK-SD-NEXT: mov d16, v2.d[1] +; CHECK-SD-NEXT: mov v0.s[1], w9 +; CHECK-SD-NEXT: fcvtzu w9, d1 +; CHECK-SD-NEXT: cmp w10, #255 +; CHECK-SD-NEXT: csel w10, w10, w8, lo +; CHECK-SD-NEXT: cmp w9, #255 +; CHECK-SD-NEXT: mov w11, v0.s[1] +; CHECK-SD-NEXT: csel w9, w9, w8, lo +; CHECK-SD-NEXT: fmov s1, w9 +; CHECK-SD-NEXT: fcvtzu w9, d16 +; CHECK-SD-NEXT: mov d16, v3.d[1] +; CHECK-SD-NEXT: mov v0.b[1], w11 +; CHECK-SD-NEXT: mov v1.s[1], w10 +; CHECK-SD-NEXT: fcvtzu w10, d2 +; CHECK-SD-NEXT: cmp w9, #255 +; CHECK-SD-NEXT: csel w9, w9, w8, lo +; CHECK-SD-NEXT: cmp w10, #255 +; CHECK-SD-NEXT: mov w11, v1.s[1] +; CHECK-SD-NEXT: mov v0.b[2], v1.b[0] +; CHECK-SD-NEXT: csel w10, w10, w8, lo +; CHECK-SD-NEXT: fmov s2, w10 +; CHECK-SD-NEXT: fcvtzu w10, d16 +; CHECK-SD-NEXT: mov d16, v4.d[1] +; CHECK-SD-NEXT: mov v0.b[3], w11 +; CHECK-SD-NEXT: mov v2.s[1], w9 +; CHECK-SD-NEXT: fcvtzu w9, d3 +; CHECK-SD-NEXT: cmp w10, #255 +; CHECK-SD-NEXT: csel w10, w10, w8, lo +; CHECK-SD-NEXT: cmp w9, #255 +; CHECK-SD-NEXT: mov w11, v2.s[1] +; CHECK-SD-NEXT: mov v0.b[4], v2.b[0] +; CHECK-SD-NEXT: csel w9, w9, w8, lo +; CHECK-SD-NEXT: fmov s3, w9 +; CHECK-SD-NEXT: fcvtzu w9, d16 +; CHECK-SD-NEXT: mov d16, v5.d[1] +; CHECK-SD-NEXT: mov v0.b[5], w11 +; CHECK-SD-NEXT: mov v3.s[1], w10 +; CHECK-SD-NEXT: fcvtzu w10, d4 +; CHECK-SD-NEXT: cmp w9, #255 +; CHECK-SD-NEXT: csel w9, w9, w8, lo +; CHECK-SD-NEXT: cmp w10, #255 +; CHECK-SD-NEXT: mov w11, v3.s[1] +; CHECK-SD-NEXT: mov v0.b[6], v3.b[0] +; CHECK-SD-NEXT: csel w10, w10, w8, lo +; CHECK-SD-NEXT: fmov s4, w10 +; CHECK-SD-NEXT: fcvtzu w10, d16 +; CHECK-SD-NEXT: mov v0.b[7], w11 +; CHECK-SD-NEXT: mov v4.s[1], w9 +; CHECK-SD-NEXT: fcvtzu w9, d5 +; CHECK-SD-NEXT: mov d5, v6.d[1] +; CHECK-SD-NEXT: cmp w10, #255 +; CHECK-SD-NEXT: csel w10, w10, w8, lo +; CHECK-SD-NEXT: cmp w9, #255 +; CHECK-SD-NEXT: mov w11, v4.s[1] +; CHECK-SD-NEXT: mov v0.b[8], v4.b[0] +; CHECK-SD-NEXT: csel w9, w9, w8, lo +; CHECK-SD-NEXT: fmov s16, w9 +; CHECK-SD-NEXT: fcvtzu w9, d5 +; CHECK-SD-NEXT: mov d5, v7.d[1] +; CHECK-SD-NEXT: mov v0.b[9], w11 +; CHECK-SD-NEXT: mov v16.s[1], w10 +; CHECK-SD-NEXT: fcvtzu w10, d6 +; CHECK-SD-NEXT: cmp w9, #255 +; CHECK-SD-NEXT: csel w9, w9, w8, lo +; CHECK-SD-NEXT: cmp w10, #255 +; CHECK-SD-NEXT: mov v0.b[10], v16.b[0] +; CHECK-SD-NEXT: mov w11, v16.s[1] +; CHECK-SD-NEXT: csel w10, w10, w8, lo +; CHECK-SD-NEXT: fmov s6, w10 +; CHECK-SD-NEXT: fcvtzu w10, d7 +; CHECK-SD-NEXT: mov v0.b[11], w11 +; CHECK-SD-NEXT: mov v6.s[1], w9 +; CHECK-SD-NEXT: fcvtzu w9, d5 +; CHECK-SD-NEXT: cmp w9, #255 +; CHECK-SD-NEXT: mov v0.b[12], v6.b[0] +; CHECK-SD-NEXT: mov w11, v6.s[1] +; CHECK-SD-NEXT: csel w9, w9, w8, lo +; CHECK-SD-NEXT: cmp w10, #255 +; CHECK-SD-NEXT: csel w8, w10, w8, lo +; CHECK-SD-NEXT: fmov s5, w8 +; CHECK-SD-NEXT: mov v0.b[13], w11 +; CHECK-SD-NEXT: mov v5.s[1], w9 +; CHECK-SD-NEXT: mov v0.b[14], v5.b[0] +; CHECK-SD-NEXT: mov w8, v5.s[1] +; CHECK-SD-NEXT: mov v0.b[15], w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v16f64_v16i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: fcvtzu v1.2d, v1.2d +; CHECK-GI-NEXT: movi v16.2d, #0x000000000000ff +; CHECK-GI-NEXT: fcvtzu v2.2d, v2.2d +; CHECK-GI-NEXT: fcvtzu v3.2d, v3.2d +; CHECK-GI-NEXT: fcvtzu v4.2d, v4.2d +; CHECK-GI-NEXT: fcvtzu v5.2d, v5.2d +; CHECK-GI-NEXT: fcvtzu v6.2d, v6.2d +; CHECK-GI-NEXT: fcvtzu v7.2d, v7.2d +; CHECK-GI-NEXT: cmhi v17.2d, v16.2d, v0.2d +; CHECK-GI-NEXT: cmhi v18.2d, v16.2d, v1.2d +; CHECK-GI-NEXT: cmhi v19.2d, v16.2d, v2.2d +; CHECK-GI-NEXT: cmhi v20.2d, v16.2d, v3.2d +; CHECK-GI-NEXT: cmhi v21.2d, v16.2d, v4.2d +; CHECK-GI-NEXT: cmhi v22.2d, v16.2d, v5.2d +; CHECK-GI-NEXT: cmhi v23.2d, v16.2d, v6.2d +; CHECK-GI-NEXT: cmhi v24.2d, v16.2d, v7.2d +; CHECK-GI-NEXT: bif v0.16b, v16.16b, v17.16b +; CHECK-GI-NEXT: bif v1.16b, v16.16b, v18.16b +; CHECK-GI-NEXT: bif v2.16b, v16.16b, v19.16b +; CHECK-GI-NEXT: bif v3.16b, v16.16b, v20.16b +; CHECK-GI-NEXT: bif v4.16b, v16.16b, v21.16b +; CHECK-GI-NEXT: bif v5.16b, v16.16b, v22.16b +; CHECK-GI-NEXT: bif v6.16b, v16.16b, v23.16b +; CHECK-GI-NEXT: bif v7.16b, v16.16b, v24.16b +; CHECK-GI-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: uzp1 v1.4s, v2.4s, v3.4s +; CHECK-GI-NEXT: uzp1 v2.4s, v4.4s, v5.4s +; CHECK-GI-NEXT: uzp1 v3.4s, v6.4s, v7.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: uzp1 v1.8h, v2.8h, v3.8h +; CHECK-GI-NEXT: uzp1 v0.16b, v0.16b, v1.16b +; CHECK-GI-NEXT: ret %x = call <16 x i8> @llvm.fptoui.sat.v16f64.v16i8(<16 x double> %f) ret <16 x i8> %x } define <8 x i16> @test_unsigned_v8f64_v8i16(<8 x double> %f) { -; CHECK-LABEL: test_unsigned_v8f64_v8i16: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d4, v3.d[1] -; CHECK-NEXT: mov d5, v2.d[1] -; CHECK-NEXT: mov w10, #65535 // =0xffff -; CHECK-NEXT: fcvtzu w9, d3 -; CHECK-NEXT: mov d3, v1.d[1] -; CHECK-NEXT: fcvtzu w12, d2 -; CHECK-NEXT: fcvtzu w14, d1 -; CHECK-NEXT: fcvtzu w8, d4 -; CHECK-NEXT: mov d4, v0.d[1] -; CHECK-NEXT: fcvtzu w11, d5 -; CHECK-NEXT: fcvtzu w13, d3 -; CHECK-NEXT: cmp w8, w10 -; CHECK-NEXT: fcvtzu w15, d4 -; CHECK-NEXT: csel w8, w8, w10, lo -; CHECK-NEXT: cmp w9, w10 -; CHECK-NEXT: csel w9, w9, w10, lo -; CHECK-NEXT: cmp w11, w10 -; CHECK-NEXT: fmov s4, w9 -; CHECK-NEXT: csel w9, w11, w10, lo -; CHECK-NEXT: cmp w12, w10 -; CHECK-NEXT: fcvtzu w11, d0 -; CHECK-NEXT: mov v4.s[1], w8 -; CHECK-NEXT: csel w8, w12, w10, lo -; CHECK-NEXT: cmp w13, w10 -; CHECK-NEXT: fmov s3, w8 -; CHECK-NEXT: csel w8, w13, w10, lo -; CHECK-NEXT: cmp w14, w10 -; CHECK-NEXT: mov v3.s[1], w9 -; CHECK-NEXT: csel w9, w14, w10, lo -; CHECK-NEXT: cmp w15, w10 -; CHECK-NEXT: fmov s2, w9 -; CHECK-NEXT: csel w9, w15, w10, lo -; CHECK-NEXT: cmp w11, w10 -; CHECK-NEXT: mov v2.s[1], w8 -; CHECK-NEXT: csel w8, w11, w10, lo -; CHECK-NEXT: fmov s1, w8 -; CHECK-NEXT: adrp x8, .LCPI84_0 -; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI84_0] -; CHECK-NEXT: mov v1.s[1], w9 -; CHECK-NEXT: tbl v0.16b, { v1.16b, v2.16b, v3.16b, v4.16b }, v0.16b -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v8f64_v8i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d4, v3.d[1] +; CHECK-SD-NEXT: mov d5, v2.d[1] +; CHECK-SD-NEXT: mov w10, #65535 // =0xffff +; CHECK-SD-NEXT: fcvtzu w9, d3 +; CHECK-SD-NEXT: mov d3, v1.d[1] +; CHECK-SD-NEXT: fcvtzu w12, d2 +; CHECK-SD-NEXT: fcvtzu w14, d1 +; CHECK-SD-NEXT: fcvtzu w8, d4 +; CHECK-SD-NEXT: mov d4, v0.d[1] +; CHECK-SD-NEXT: fcvtzu w11, d5 +; CHECK-SD-NEXT: fcvtzu w13, d3 +; CHECK-SD-NEXT: cmp w8, w10 +; CHECK-SD-NEXT: fcvtzu w15, d4 +; CHECK-SD-NEXT: csel w8, w8, w10, lo +; CHECK-SD-NEXT: cmp w9, w10 +; CHECK-SD-NEXT: csel w9, w9, w10, lo +; CHECK-SD-NEXT: cmp w11, w10 +; CHECK-SD-NEXT: fmov s4, w9 +; CHECK-SD-NEXT: csel w9, w11, w10, lo +; CHECK-SD-NEXT: cmp w12, w10 +; CHECK-SD-NEXT: fcvtzu w11, d0 +; CHECK-SD-NEXT: mov v4.s[1], w8 +; CHECK-SD-NEXT: csel w8, w12, w10, lo +; CHECK-SD-NEXT: cmp w13, w10 +; CHECK-SD-NEXT: fmov s3, w8 +; CHECK-SD-NEXT: csel w8, w13, w10, lo +; CHECK-SD-NEXT: cmp w14, w10 +; CHECK-SD-NEXT: mov v3.s[1], w9 +; CHECK-SD-NEXT: csel w9, w14, w10, lo +; CHECK-SD-NEXT: cmp w15, w10 +; CHECK-SD-NEXT: fmov s2, w9 +; CHECK-SD-NEXT: csel w9, w15, w10, lo +; CHECK-SD-NEXT: cmp w11, w10 +; CHECK-SD-NEXT: mov v2.s[1], w8 +; CHECK-SD-NEXT: csel w8, w11, w10, lo +; CHECK-SD-NEXT: fmov s1, w8 +; CHECK-SD-NEXT: adrp x8, .LCPI84_0 +; CHECK-SD-NEXT: ldr q0, [x8, :lo12:.LCPI84_0] +; CHECK-SD-NEXT: mov v1.s[1], w9 +; CHECK-SD-NEXT: tbl v0.16b, { v1.16b, v2.16b, v3.16b, v4.16b }, v0.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v8f64_v8i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v4.2d, #0x0000000000ffff +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: fcvtzu v1.2d, v1.2d +; CHECK-GI-NEXT: fcvtzu v2.2d, v2.2d +; CHECK-GI-NEXT: fcvtzu v3.2d, v3.2d +; CHECK-GI-NEXT: cmhi v5.2d, v4.2d, v0.2d +; CHECK-GI-NEXT: cmhi v6.2d, v4.2d, v1.2d +; CHECK-GI-NEXT: cmhi v7.2d, v4.2d, v2.2d +; CHECK-GI-NEXT: cmhi v16.2d, v4.2d, v3.2d +; CHECK-GI-NEXT: bif v0.16b, v4.16b, v5.16b +; CHECK-GI-NEXT: bif v1.16b, v4.16b, v6.16b +; CHECK-GI-NEXT: bif v2.16b, v4.16b, v7.16b +; CHECK-GI-NEXT: bif v3.16b, v4.16b, v16.16b +; CHECK-GI-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: uzp1 v1.4s, v2.4s, v3.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: ret %x = call <8 x i16> @llvm.fptoui.sat.v8f64.v8i16(<8 x double> %f) ret <8 x i16> %x } define <16 x i16> @test_unsigned_v16f64_v16i16(<16 x double> %f) { -; CHECK-LABEL: test_unsigned_v16f64_v16i16: -; CHECK: // %bb.0: -; CHECK-NEXT: mov d16, v3.d[1] -; CHECK-NEXT: mov d17, v2.d[1] -; CHECK-NEXT: mov w8, #65535 // =0xffff -; CHECK-NEXT: fcvtzu w9, d3 -; CHECK-NEXT: mov d3, v1.d[1] -; CHECK-NEXT: fcvtzu w10, d1 -; CHECK-NEXT: mov d1, v0.d[1] -; CHECK-NEXT: fcvtzu w11, d2 -; CHECK-NEXT: fcvtzu w12, d0 -; CHECK-NEXT: mov d0, v7.d[1] -; CHECK-NEXT: mov d2, v6.d[1] -; CHECK-NEXT: fcvtzu w14, d7 -; CHECK-NEXT: fcvtzu w13, d16 -; CHECK-NEXT: fcvtzu w16, d17 -; CHECK-NEXT: fcvtzu w15, d6 -; CHECK-NEXT: fcvtzu w17, d3 -; CHECK-NEXT: mov d6, v5.d[1] -; CHECK-NEXT: mov d3, v4.d[1] -; CHECK-NEXT: fcvtzu w18, d1 -; CHECK-NEXT: cmp w13, w8 -; CHECK-NEXT: csel w13, w13, w8, lo -; CHECK-NEXT: cmp w9, w8 -; CHECK-NEXT: csel w9, w9, w8, lo -; CHECK-NEXT: cmp w16, w8 -; CHECK-NEXT: fmov s19, w9 -; CHECK-NEXT: csel w9, w16, w8, lo -; CHECK-NEXT: cmp w11, w8 -; CHECK-NEXT: fcvtzu w16, d0 -; CHECK-NEXT: csel w11, w11, w8, lo -; CHECK-NEXT: cmp w17, w8 -; CHECK-NEXT: mov v19.s[1], w13 -; CHECK-NEXT: csel w13, w17, w8, lo -; CHECK-NEXT: cmp w10, w8 -; CHECK-NEXT: csel w10, w10, w8, lo -; CHECK-NEXT: cmp w18, w8 -; CHECK-NEXT: fmov s18, w11 -; CHECK-NEXT: csel w11, w18, w8, lo -; CHECK-NEXT: cmp w12, w8 -; CHECK-NEXT: fcvtzu w17, d2 -; CHECK-NEXT: csel w12, w12, w8, lo -; CHECK-NEXT: cmp w16, w8 -; CHECK-NEXT: fcvtzu w18, d6 -; CHECK-NEXT: mov v18.s[1], w9 -; CHECK-NEXT: csel w9, w16, w8, lo -; CHECK-NEXT: cmp w14, w8 -; CHECK-NEXT: fmov s17, w10 -; CHECK-NEXT: csel w10, w14, w8, lo -; CHECK-NEXT: fcvtzu w16, d5 -; CHECK-NEXT: fmov s23, w10 -; CHECK-NEXT: cmp w17, w8 -; CHECK-NEXT: fcvtzu w14, d3 -; CHECK-NEXT: csel w10, w17, w8, lo -; CHECK-NEXT: cmp w15, w8 -; CHECK-NEXT: fcvtzu w17, d4 -; CHECK-NEXT: mov v17.s[1], w13 -; CHECK-NEXT: mov v23.s[1], w9 -; CHECK-NEXT: csel w9, w15, w8, lo -; CHECK-NEXT: cmp w18, w8 -; CHECK-NEXT: fmov s22, w9 -; CHECK-NEXT: csel w9, w18, w8, lo -; CHECK-NEXT: cmp w16, w8 -; CHECK-NEXT: fmov s16, w12 -; CHECK-NEXT: mov v22.s[1], w10 -; CHECK-NEXT: csel w10, w16, w8, lo -; CHECK-NEXT: cmp w14, w8 -; CHECK-NEXT: fmov s21, w10 -; CHECK-NEXT: csel w10, w14, w8, lo -; CHECK-NEXT: cmp w17, w8 -; CHECK-NEXT: csel w8, w17, w8, lo -; CHECK-NEXT: mov v16.s[1], w11 -; CHECK-NEXT: mov v21.s[1], w9 -; CHECK-NEXT: fmov s20, w8 -; CHECK-NEXT: adrp x8, .LCPI85_0 -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI85_0] -; CHECK-NEXT: mov v20.s[1], w10 -; CHECK-NEXT: tbl v0.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v1.16b -; CHECK-NEXT: tbl v1.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v1.16b -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_unsigned_v16f64_v16i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov d16, v3.d[1] +; CHECK-SD-NEXT: mov d17, v2.d[1] +; CHECK-SD-NEXT: mov w8, #65535 // =0xffff +; CHECK-SD-NEXT: fcvtzu w9, d3 +; CHECK-SD-NEXT: mov d3, v1.d[1] +; CHECK-SD-NEXT: fcvtzu w10, d1 +; CHECK-SD-NEXT: mov d1, v0.d[1] +; CHECK-SD-NEXT: fcvtzu w11, d2 +; CHECK-SD-NEXT: fcvtzu w12, d0 +; CHECK-SD-NEXT: mov d0, v7.d[1] +; CHECK-SD-NEXT: mov d2, v6.d[1] +; CHECK-SD-NEXT: fcvtzu w14, d7 +; CHECK-SD-NEXT: fcvtzu w13, d16 +; CHECK-SD-NEXT: fcvtzu w16, d17 +; CHECK-SD-NEXT: fcvtzu w15, d6 +; CHECK-SD-NEXT: fcvtzu w17, d3 +; CHECK-SD-NEXT: mov d6, v5.d[1] +; CHECK-SD-NEXT: mov d3, v4.d[1] +; CHECK-SD-NEXT: fcvtzu w18, d1 +; CHECK-SD-NEXT: cmp w13, w8 +; CHECK-SD-NEXT: csel w13, w13, w8, lo +; CHECK-SD-NEXT: cmp w9, w8 +; CHECK-SD-NEXT: csel w9, w9, w8, lo +; CHECK-SD-NEXT: cmp w16, w8 +; CHECK-SD-NEXT: fmov s19, w9 +; CHECK-SD-NEXT: csel w9, w16, w8, lo +; CHECK-SD-NEXT: cmp w11, w8 +; CHECK-SD-NEXT: fcvtzu w16, d0 +; CHECK-SD-NEXT: csel w11, w11, w8, lo +; CHECK-SD-NEXT: cmp w17, w8 +; CHECK-SD-NEXT: mov v19.s[1], w13 +; CHECK-SD-NEXT: csel w13, w17, w8, lo +; CHECK-SD-NEXT: cmp w10, w8 +; CHECK-SD-NEXT: csel w10, w10, w8, lo +; CHECK-SD-NEXT: cmp w18, w8 +; CHECK-SD-NEXT: fmov s18, w11 +; CHECK-SD-NEXT: csel w11, w18, w8, lo +; CHECK-SD-NEXT: cmp w12, w8 +; CHECK-SD-NEXT: fcvtzu w17, d2 +; CHECK-SD-NEXT: csel w12, w12, w8, lo +; CHECK-SD-NEXT: cmp w16, w8 +; CHECK-SD-NEXT: fcvtzu w18, d6 +; CHECK-SD-NEXT: mov v18.s[1], w9 +; CHECK-SD-NEXT: csel w9, w16, w8, lo +; CHECK-SD-NEXT: cmp w14, w8 +; CHECK-SD-NEXT: fmov s17, w10 +; CHECK-SD-NEXT: csel w10, w14, w8, lo +; CHECK-SD-NEXT: fcvtzu w16, d5 +; CHECK-SD-NEXT: fmov s23, w10 +; CHECK-SD-NEXT: cmp w17, w8 +; CHECK-SD-NEXT: fcvtzu w14, d3 +; CHECK-SD-NEXT: csel w10, w17, w8, lo +; CHECK-SD-NEXT: cmp w15, w8 +; CHECK-SD-NEXT: fcvtzu w17, d4 +; CHECK-SD-NEXT: mov v17.s[1], w13 +; CHECK-SD-NEXT: mov v23.s[1], w9 +; CHECK-SD-NEXT: csel w9, w15, w8, lo +; CHECK-SD-NEXT: cmp w18, w8 +; CHECK-SD-NEXT: fmov s22, w9 +; CHECK-SD-NEXT: csel w9, w18, w8, lo +; CHECK-SD-NEXT: cmp w16, w8 +; CHECK-SD-NEXT: fmov s16, w12 +; CHECK-SD-NEXT: mov v22.s[1], w10 +; CHECK-SD-NEXT: csel w10, w16, w8, lo +; CHECK-SD-NEXT: cmp w14, w8 +; CHECK-SD-NEXT: fmov s21, w10 +; CHECK-SD-NEXT: csel w10, w14, w8, lo +; CHECK-SD-NEXT: cmp w17, w8 +; CHECK-SD-NEXT: csel w8, w17, w8, lo +; CHECK-SD-NEXT: mov v16.s[1], w11 +; CHECK-SD-NEXT: mov v21.s[1], w9 +; CHECK-SD-NEXT: fmov s20, w8 +; CHECK-SD-NEXT: adrp x8, .LCPI85_0 +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI85_0] +; CHECK-SD-NEXT: mov v20.s[1], w10 +; CHECK-SD-NEXT: tbl v0.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v1.16b +; CHECK-SD-NEXT: tbl v1.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v1.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_unsigned_v16f64_v16i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fcvtzu v0.2d, v0.2d +; CHECK-GI-NEXT: fcvtzu v1.2d, v1.2d +; CHECK-GI-NEXT: movi v16.2d, #0x0000000000ffff +; CHECK-GI-NEXT: fcvtzu v2.2d, v2.2d +; CHECK-GI-NEXT: fcvtzu v3.2d, v3.2d +; CHECK-GI-NEXT: fcvtzu v4.2d, v4.2d +; CHECK-GI-NEXT: fcvtzu v5.2d, v5.2d +; CHECK-GI-NEXT: fcvtzu v6.2d, v6.2d +; CHECK-GI-NEXT: fcvtzu v7.2d, v7.2d +; CHECK-GI-NEXT: cmhi v17.2d, v16.2d, v0.2d +; CHECK-GI-NEXT: cmhi v18.2d, v16.2d, v1.2d +; CHECK-GI-NEXT: cmhi v19.2d, v16.2d, v2.2d +; CHECK-GI-NEXT: cmhi v20.2d, v16.2d, v3.2d +; CHECK-GI-NEXT: cmhi v21.2d, v16.2d, v4.2d +; CHECK-GI-NEXT: cmhi v22.2d, v16.2d, v5.2d +; CHECK-GI-NEXT: cmhi v23.2d, v16.2d, v6.2d +; CHECK-GI-NEXT: cmhi v24.2d, v16.2d, v7.2d +; CHECK-GI-NEXT: bif v0.16b, v16.16b, v17.16b +; CHECK-GI-NEXT: bif v1.16b, v16.16b, v18.16b +; CHECK-GI-NEXT: bif v2.16b, v16.16b, v19.16b +; CHECK-GI-NEXT: bif v3.16b, v16.16b, v20.16b +; CHECK-GI-NEXT: bif v4.16b, v16.16b, v21.16b +; CHECK-GI-NEXT: bif v5.16b, v16.16b, v22.16b +; CHECK-GI-NEXT: bif v6.16b, v16.16b, v23.16b +; CHECK-GI-NEXT: bif v7.16b, v16.16b, v24.16b +; CHECK-GI-NEXT: uzp1 v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: uzp1 v1.4s, v2.4s, v3.4s +; CHECK-GI-NEXT: uzp1 v2.4s, v4.4s, v5.4s +; CHECK-GI-NEXT: uzp1 v3.4s, v6.4s, v7.4s +; CHECK-GI-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: uzp1 v1.8h, v2.8h, v3.8h +; CHECK-GI-NEXT: ret %x = call <16 x i16> @llvm.fptoui.sat.v16f64.v16i16(<16 x double> %f) ret <16 x i16> %x } diff --git a/llvm/test/CodeGen/AArch64/init-undef.mir b/llvm/test/CodeGen/AArch64/init-undef.mir new file mode 100644 index 0000000..7935c09 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/init-undef.mir @@ -0,0 +1,27 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 +# RUN: llc -mtriple=aarch64-- -run-pass=init-undef -o - %s | FileCheck %s + +--- +name: test_stxp_undef +body: | + bb.0: + liveins: $x0, $x1 + + ; CHECK-LABEL: name: test_stxp_undef + ; CHECK: liveins: $x0, $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x0 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr64 = IMPLICIT_DEF + ; CHECK-NEXT: [[INIT_UNDEF:%[0-9]+]]:gpr64 = INIT_UNDEF + ; CHECK-NEXT: early-clobber %3:gpr32 = STXPX killed [[INIT_UNDEF]], [[COPY]], [[COPY1]] :: (volatile store (s128)) + ; CHECK-NEXT: $w0 = COPY %3 + ; CHECK-NEXT: RET_ReallyLR implicit $w0 + %1:gpr64 = COPY $x1 + %0:gpr64common = COPY $x0 + %3:gpr64 = IMPLICIT_DEF + early-clobber %2:gpr32 = STXPX killed %3, %1, %0 :: (volatile store (s128)) + $w0 = COPY %2 + RET_ReallyLR implicit $w0 + +... diff --git a/llvm/test/CodeGen/AArch64/scmp.ll b/llvm/test/CodeGen/AArch64/scmp.ll index bcad1c1..3d18a90 100644 --- a/llvm/test/CodeGen/AArch64/scmp.ll +++ b/llvm/test/CodeGen/AArch64/scmp.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD -; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI +; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI define i8 @scmp.8.8(i8 %x, i8 %y) nounwind { ; CHECK-SD-LABEL: scmp.8.8: @@ -132,3 +132,416 @@ define i64 @scmp.64.64(i64 %x, i64 %y) nounwind { %1 = call i64 @llvm.scmp(i64 %x, i64 %y) ret i64 %1 } + +define <8 x i8> @s_v8i8(<8 x i8> %a, <8 x i8> %b) { +; CHECK-SD-LABEL: s_v8i8: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v2.8b, #1 +; CHECK-SD-NEXT: cmgt v3.8b, v0.8b, v1.8b +; CHECK-SD-NEXT: cmgt v0.8b, v1.8b, v0.8b +; CHECK-SD-NEXT: and v1.8b, v3.8b, v2.8b +; CHECK-SD-NEXT: orr v0.8b, v1.8b, v0.8b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: s_v8i8: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v2.8b, #1 +; CHECK-GI-NEXT: cmgt v3.8b, v0.8b, v1.8b +; CHECK-GI-NEXT: movi d4, #0xffffffffffffffff +; CHECK-GI-NEXT: cmgt v0.8b, v1.8b, v0.8b +; CHECK-GI-NEXT: and v2.8b, v2.8b, v3.8b +; CHECK-GI-NEXT: bsl v0.8b, v4.8b, v2.8b +; CHECK-GI-NEXT: ret +entry: + %c = call <8 x i8> @llvm.scmp(<8 x i8> %a, <8 x i8> %b) + ret <8 x i8> %c +} + +define <16 x i8> @s_v16i8(<16 x i8> %a, <16 x i8> %b) { +; CHECK-SD-LABEL: s_v16i8: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v2.16b, #1 +; CHECK-SD-NEXT: cmgt v3.16b, v0.16b, v1.16b +; CHECK-SD-NEXT: cmgt v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: and v1.16b, v3.16b, v2.16b +; CHECK-SD-NEXT: orr v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: s_v16i8: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v2.16b, #1 +; CHECK-GI-NEXT: cmgt v3.16b, v0.16b, v1.16b +; CHECK-GI-NEXT: movi v4.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: cmgt v0.16b, v1.16b, v0.16b +; CHECK-GI-NEXT: and v2.16b, v2.16b, v3.16b +; CHECK-GI-NEXT: bsl v0.16b, v4.16b, v2.16b +; CHECK-GI-NEXT: ret +entry: + %c = call <16 x i8> @llvm.scmp(<16 x i8> %a, <16 x i8> %b) + ret <16 x i8> %c +} + +define <4 x i16> @s_v4i16(<4 x i16> %a, <4 x i16> %b) { +; CHECK-SD-LABEL: s_v4i16: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v2.4h, #1 +; CHECK-SD-NEXT: cmgt v3.4h, v0.4h, v1.4h +; CHECK-SD-NEXT: cmgt v0.4h, v1.4h, v0.4h +; CHECK-SD-NEXT: and v1.8b, v3.8b, v2.8b +; CHECK-SD-NEXT: orr v0.8b, v1.8b, v0.8b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: s_v4i16: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v2.4h, #1 +; CHECK-GI-NEXT: cmgt v3.4h, v0.4h, v1.4h +; CHECK-GI-NEXT: movi d4, #0xffffffffffffffff +; CHECK-GI-NEXT: cmgt v0.4h, v1.4h, v0.4h +; CHECK-GI-NEXT: and v2.8b, v2.8b, v3.8b +; CHECK-GI-NEXT: bsl v0.8b, v4.8b, v2.8b +; CHECK-GI-NEXT: ret +entry: + %c = call <4 x i16> @llvm.scmp(<4 x i16> %a, <4 x i16> %b) + ret <4 x i16> %c +} + +define <8 x i16> @s_v8i16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-SD-LABEL: s_v8i16: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v2.8h, #1 +; CHECK-SD-NEXT: cmgt v3.8h, v0.8h, v1.8h +; CHECK-SD-NEXT: cmgt v0.8h, v1.8h, v0.8h +; CHECK-SD-NEXT: and v1.16b, v3.16b, v2.16b +; CHECK-SD-NEXT: orr v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: s_v8i16: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v2.8h, #1 +; CHECK-GI-NEXT: cmgt v3.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: movi v4.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: cmgt v0.8h, v1.8h, v0.8h +; CHECK-GI-NEXT: and v2.16b, v2.16b, v3.16b +; CHECK-GI-NEXT: bsl v0.16b, v4.16b, v2.16b +; CHECK-GI-NEXT: ret +entry: + %c = call <8 x i16> @llvm.scmp(<8 x i16> %a, <8 x i16> %b) + ret <8 x i16> %c +} + +define <16 x i16> @s_v16i16(<16 x i16> %a, <16 x i16> %b) { +; CHECK-SD-LABEL: s_v16i16: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v4.8h, #1 +; CHECK-SD-NEXT: cmgt v5.8h, v0.8h, v2.8h +; CHECK-SD-NEXT: cmgt v6.8h, v1.8h, v3.8h +; CHECK-SD-NEXT: cmgt v0.8h, v2.8h, v0.8h +; CHECK-SD-NEXT: cmgt v1.8h, v3.8h, v1.8h +; CHECK-SD-NEXT: and v2.16b, v5.16b, v4.16b +; CHECK-SD-NEXT: and v3.16b, v6.16b, v4.16b +; CHECK-SD-NEXT: orr v0.16b, v2.16b, v0.16b +; CHECK-SD-NEXT: orr v1.16b, v3.16b, v1.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: s_v16i16: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v4.8h, #1 +; CHECK-GI-NEXT: cmgt v5.8h, v0.8h, v2.8h +; CHECK-GI-NEXT: cmgt v6.8h, v1.8h, v3.8h +; CHECK-GI-NEXT: movi v7.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: cmgt v0.8h, v2.8h, v0.8h +; CHECK-GI-NEXT: cmgt v1.8h, v3.8h, v1.8h +; CHECK-GI-NEXT: and v5.16b, v4.16b, v5.16b +; CHECK-GI-NEXT: and v4.16b, v4.16b, v6.16b +; CHECK-GI-NEXT: bsl v0.16b, v7.16b, v5.16b +; CHECK-GI-NEXT: bsl v1.16b, v7.16b, v4.16b +; CHECK-GI-NEXT: ret +entry: + %c = call <16 x i16> @llvm.scmp(<16 x i16> %a, <16 x i16> %b) + ret <16 x i16> %c +} + +define <2 x i32> @s_v2i32(<2 x i32> %a, <2 x i32> %b) { +; CHECK-SD-LABEL: s_v2i32: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v2.2s, #1 +; CHECK-SD-NEXT: cmgt v3.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: cmgt v0.2s, v1.2s, v0.2s +; CHECK-SD-NEXT: and v1.8b, v3.8b, v2.8b +; CHECK-SD-NEXT: orr v0.8b, v1.8b, v0.8b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: s_v2i32: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v2.2s, #1 +; CHECK-GI-NEXT: cmgt v3.2s, v0.2s, v1.2s +; CHECK-GI-NEXT: movi d4, #0xffffffffffffffff +; CHECK-GI-NEXT: cmgt v0.2s, v1.2s, v0.2s +; CHECK-GI-NEXT: and v2.8b, v2.8b, v3.8b +; CHECK-GI-NEXT: bsl v0.8b, v4.8b, v2.8b +; CHECK-GI-NEXT: ret +entry: + %c = call <2 x i32> @llvm.scmp(<2 x i32> %a, <2 x i32> %b) + ret <2 x i32> %c +} + +define <4 x i32> @s_v4i32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-SD-LABEL: s_v4i32: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v2.4s, #1 +; CHECK-SD-NEXT: cmgt v3.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: cmgt v0.4s, v1.4s, v0.4s +; CHECK-SD-NEXT: and v1.16b, v3.16b, v2.16b +; CHECK-SD-NEXT: orr v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: s_v4i32: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v2.4s, #1 +; CHECK-GI-NEXT: cmgt v3.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: movi v4.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: cmgt v0.4s, v1.4s, v0.4s +; CHECK-GI-NEXT: and v2.16b, v2.16b, v3.16b +; CHECK-GI-NEXT: bsl v0.16b, v4.16b, v2.16b +; CHECK-GI-NEXT: ret +entry: + %c = call <4 x i32> @llvm.scmp(<4 x i32> %a, <4 x i32> %b) + ret <4 x i32> %c +} + +define <8 x i32> @s_v8i32(<8 x i32> %a, <8 x i32> %b) { +; CHECK-SD-LABEL: s_v8i32: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v4.4s, #1 +; CHECK-SD-NEXT: cmgt v5.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: cmgt v6.4s, v1.4s, v3.4s +; CHECK-SD-NEXT: cmgt v0.4s, v2.4s, v0.4s +; CHECK-SD-NEXT: cmgt v1.4s, v3.4s, v1.4s +; CHECK-SD-NEXT: and v2.16b, v5.16b, v4.16b +; CHECK-SD-NEXT: and v3.16b, v6.16b, v4.16b +; CHECK-SD-NEXT: orr v0.16b, v2.16b, v0.16b +; CHECK-SD-NEXT: orr v1.16b, v3.16b, v1.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: s_v8i32: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v4.4s, #1 +; CHECK-GI-NEXT: cmgt v5.4s, v0.4s, v2.4s +; CHECK-GI-NEXT: cmgt v6.4s, v1.4s, v3.4s +; CHECK-GI-NEXT: movi v7.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: cmgt v0.4s, v2.4s, v0.4s +; CHECK-GI-NEXT: cmgt v1.4s, v3.4s, v1.4s +; CHECK-GI-NEXT: and v5.16b, v4.16b, v5.16b +; CHECK-GI-NEXT: and v4.16b, v4.16b, v6.16b +; CHECK-GI-NEXT: bsl v0.16b, v7.16b, v5.16b +; CHECK-GI-NEXT: bsl v1.16b, v7.16b, v4.16b +; CHECK-GI-NEXT: ret +entry: + %c = call <8 x i32> @llvm.scmp(<8 x i32> %a, <8 x i32> %b) + ret <8 x i32> %c +} + +define <2 x i64> @s_v2i64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-SD-LABEL: s_v2i64: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: mov w8, #1 // =0x1 +; CHECK-SD-NEXT: cmgt v2.2d, v0.2d, v1.2d +; CHECK-SD-NEXT: cmgt v0.2d, v1.2d, v0.2d +; CHECK-SD-NEXT: dup v3.2d, x8 +; CHECK-SD-NEXT: and v1.16b, v2.16b, v3.16b +; CHECK-SD-NEXT: orr v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: s_v2i64: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: adrp x8, .LCPI16_0 +; CHECK-GI-NEXT: cmgt v2.2d, v0.2d, v1.2d +; CHECK-GI-NEXT: movi v4.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI16_0] +; CHECK-GI-NEXT: cmgt v0.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: and v2.16b, v3.16b, v2.16b +; CHECK-GI-NEXT: bsl v0.16b, v4.16b, v2.16b +; CHECK-GI-NEXT: ret +entry: + %c = call <2 x i64> @llvm.scmp(<2 x i64> %a, <2 x i64> %b) + ret <2 x i64> %c +} + +define <4 x i64> @s_v4i64(<4 x i64> %a, <4 x i64> %b) { +; CHECK-SD-LABEL: s_v4i64: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: mov w8, #1 // =0x1 +; CHECK-SD-NEXT: cmgt v4.2d, v0.2d, v2.2d +; CHECK-SD-NEXT: cmgt v6.2d, v1.2d, v3.2d +; CHECK-SD-NEXT: dup v5.2d, x8 +; CHECK-SD-NEXT: cmgt v0.2d, v2.2d, v0.2d +; CHECK-SD-NEXT: cmgt v1.2d, v3.2d, v1.2d +; CHECK-SD-NEXT: and v2.16b, v4.16b, v5.16b +; CHECK-SD-NEXT: and v3.16b, v6.16b, v5.16b +; CHECK-SD-NEXT: orr v0.16b, v2.16b, v0.16b +; CHECK-SD-NEXT: orr v1.16b, v3.16b, v1.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: s_v4i64: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: adrp x8, .LCPI17_0 +; CHECK-GI-NEXT: cmgt v4.2d, v0.2d, v2.2d +; CHECK-GI-NEXT: cmgt v6.2d, v1.2d, v3.2d +; CHECK-GI-NEXT: ldr q5, [x8, :lo12:.LCPI17_0] +; CHECK-GI-NEXT: movi v7.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: cmgt v0.2d, v2.2d, v0.2d +; CHECK-GI-NEXT: cmgt v1.2d, v3.2d, v1.2d +; CHECK-GI-NEXT: and v4.16b, v5.16b, v4.16b +; CHECK-GI-NEXT: and v5.16b, v5.16b, v6.16b +; CHECK-GI-NEXT: bsl v0.16b, v7.16b, v4.16b +; CHECK-GI-NEXT: bsl v1.16b, v7.16b, v5.16b +; CHECK-GI-NEXT: ret +entry: + %c = call <4 x i64> @llvm.scmp(<4 x i64> %a, <4 x i64> %b) + ret <4 x i64> %c +} + +define <16 x i8> @signOf_neon_scmp(<8 x i16> %s0_lo, <8 x i16> %s0_hi, <8 x i16> %s1_lo, <8 x i16> %s1_hi) { +; CHECK-SD-LABEL: signOf_neon_scmp: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmgt v5.8h, v0.8h, v2.8h +; CHECK-SD-NEXT: cmgt v2.8h, v2.8h, v0.8h +; CHECK-SD-NEXT: cmgt v4.8h, v1.8h, v3.8h +; CHECK-SD-NEXT: cmgt v1.8h, v3.8h, v1.8h +; CHECK-SD-NEXT: umov w8, v5.h[1] +; CHECK-SD-NEXT: umov w9, v2.h[1] +; CHECK-SD-NEXT: umov w10, v5.h[0] +; CHECK-SD-NEXT: umov w11, v2.h[0] +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: cset w8, ne +; CHECK-SD-NEXT: tst w9, #0xffff +; CHECK-SD-NEXT: csinv w8, w8, wzr, eq +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v5.h[2] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w11, #0xffff +; CHECK-SD-NEXT: umov w11, v2.h[2] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: fmov s0, w9 +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v2.h[3] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w11, #0xffff +; CHECK-SD-NEXT: mov v0.b[1], w8 +; CHECK-SD-NEXT: umov w8, v5.h[3] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[2], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v5.h[4] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v2.h[4] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[3], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v5.h[5] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v2.h[5] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[4], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v5.h[6] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v2.h[6] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[5], w9 +; CHECK-SD-NEXT: umov w9, v5.h[7] +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: cset w8, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v2.h[7] +; CHECK-SD-NEXT: csinv w8, w8, wzr, eq +; CHECK-SD-NEXT: mov v0.b[6], w8 +; CHECK-SD-NEXT: tst w9, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[0] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[0] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[7], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[1] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[1] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[8], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[2] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[2] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[9], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[3] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[3] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[10], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[4] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[4] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[11], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[5] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[5] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[12], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[6] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[6] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[13], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[7] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[7] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[14], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: cset w8, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: csinv w8, w8, wzr, eq +; CHECK-SD-NEXT: mov v0.b[15], w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: signOf_neon_scmp: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: cmgt v4.8h, v0.8h, v2.8h +; CHECK-GI-NEXT: cmgt v5.8h, v1.8h, v3.8h +; CHECK-GI-NEXT: cmgt v0.8h, v2.8h, v0.8h +; CHECK-GI-NEXT: cmgt v1.8h, v3.8h, v1.8h +; CHECK-GI-NEXT: movi v2.16b, #1 +; CHECK-GI-NEXT: movi v3.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: uzp1 v4.16b, v4.16b, v5.16b +; CHECK-GI-NEXT: uzp1 v0.16b, v0.16b, v1.16b +; CHECK-GI-NEXT: shl v1.16b, v4.16b, #7 +; CHECK-GI-NEXT: shl v0.16b, v0.16b, #7 +; CHECK-GI-NEXT: sshr v1.16b, v1.16b, #7 +; CHECK-GI-NEXT: sshr v0.16b, v0.16b, #7 +; CHECK-GI-NEXT: and v1.16b, v2.16b, v1.16b +; CHECK-GI-NEXT: bsl v0.16b, v3.16b, v1.16b +; CHECK-GI-NEXT: ret +entry: + %0 = shufflevector <8 x i16> %s0_lo, <8 x i16> %s0_hi, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + %1 = shufflevector <8 x i16> %s1_lo, <8 x i16> %s1_hi, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + %or.i = tail call <16 x i8> @llvm.scmp.v16i8.v16i16(<16 x i16> %0, <16 x i16> %1) + ret <16 x i8> %or.i +} diff --git a/llvm/test/CodeGen/AArch64/sve-scmp.ll b/llvm/test/CodeGen/AArch64/sve-scmp.ll new file mode 100644 index 0000000..2083ddd --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-scmp.ll @@ -0,0 +1,160 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=aarch64 -mattr=+sve2 -verify-machineinstrs %s -o - | FileCheck %s + +define <vscale x 8 x i8> @s_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) { +; CHECK-LABEL: s_nxv8i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: sxtb z0.h, p0/m, z0.h +; CHECK-NEXT: sxtb z1.h, p0/m, z1.h +; CHECK-NEXT: cmpgt p1.h, p0/z, z0.h, z1.h +; CHECK-NEXT: cmpgt p0.h, p0/z, z1.h, z0.h +; CHECK-NEXT: mov z0.h, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z0.h, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 8 x i8> @llvm.scmp(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) + ret <vscale x 8 x i8> %c +} + +define <vscale x 16 x i8> @s_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { +; CHECK-LABEL: s_nxv16i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: cmpgt p1.b, p0/z, z0.b, z1.b +; CHECK-NEXT: cmpgt p0.b, p0/z, z1.b, z0.b +; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z0.b, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 16 x i8> @llvm.scmp(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) + ret <vscale x 16 x i8> %c +} + +define <vscale x 4 x i16> @s_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) { +; CHECK-LABEL: s_nxv4i16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: sxth z0.s, p0/m, z0.s +; CHECK-NEXT: sxth z1.s, p0/m, z1.s +; CHECK-NEXT: cmpgt p1.s, p0/z, z0.s, z1.s +; CHECK-NEXT: cmpgt p0.s, p0/z, z1.s, z0.s +; CHECK-NEXT: mov z0.s, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z0.s, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 4 x i16> @llvm.scmp(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) + ret <vscale x 4 x i16> %c +} + +define <vscale x 8 x i16> @s_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { +; CHECK-LABEL: s_nxv8i16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: cmpgt p1.h, p0/z, z0.h, z1.h +; CHECK-NEXT: cmpgt p0.h, p0/z, z1.h, z0.h +; CHECK-NEXT: mov z0.h, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z0.h, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 8 x i16> @llvm.scmp(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) + ret <vscale x 8 x i16> %c +} + +define <vscale x 16 x i16> @s_nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) { +; CHECK-LABEL: s_nxv16i16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: cmpgt p1.h, p0/z, z0.h, z2.h +; CHECK-NEXT: cmpgt p2.h, p0/z, z1.h, z3.h +; CHECK-NEXT: cmpgt p3.h, p0/z, z2.h, z0.h +; CHECK-NEXT: cmpgt p0.h, p0/z, z3.h, z1.h +; CHECK-NEXT: mov z0.h, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z1.h, p2/z, #1 // =0x1 +; CHECK-NEXT: mov z0.h, p3/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.h, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 16 x i16> @llvm.scmp(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) + ret <vscale x 16 x i16> %c +} + +define <vscale x 2 x i32> @s_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) { +; CHECK-LABEL: s_nxv2i32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: sxtw z0.d, p0/m, z0.d +; CHECK-NEXT: sxtw z1.d, p0/m, z1.d +; CHECK-NEXT: cmpgt p1.d, p0/z, z0.d, z1.d +; CHECK-NEXT: cmpgt p0.d, p0/z, z1.d, z0.d +; CHECK-NEXT: mov z0.d, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z0.d, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 2 x i32> @llvm.scmp(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) + ret <vscale x 2 x i32> %c +} + +define <vscale x 4 x i32> @s_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { +; CHECK-LABEL: s_nxv4i32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: cmpgt p1.s, p0/z, z0.s, z1.s +; CHECK-NEXT: cmpgt p0.s, p0/z, z1.s, z0.s +; CHECK-NEXT: mov z0.s, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z0.s, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 4 x i32> @llvm.scmp(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) + ret <vscale x 4 x i32> %c +} + +define <vscale x 8 x i32> @s_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) { +; CHECK-LABEL: s_nxv8i32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: cmpgt p1.s, p0/z, z0.s, z2.s +; CHECK-NEXT: cmpgt p2.s, p0/z, z1.s, z3.s +; CHECK-NEXT: cmpgt p3.s, p0/z, z2.s, z0.s +; CHECK-NEXT: cmpgt p0.s, p0/z, z3.s, z1.s +; CHECK-NEXT: mov z0.s, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z1.s, p2/z, #1 // =0x1 +; CHECK-NEXT: mov z0.s, p3/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.s, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 8 x i32> @llvm.scmp(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) + ret <vscale x 8 x i32> %c +} + +define <vscale x 2 x i64> @s_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { +; CHECK-LABEL: s_nxv2i64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: cmpgt p1.d, p0/z, z0.d, z1.d +; CHECK-NEXT: cmpgt p0.d, p0/z, z1.d, z0.d +; CHECK-NEXT: mov z0.d, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z0.d, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 2 x i64> @llvm.scmp(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) + ret <vscale x 2 x i64> %c +} + +define <vscale x 4 x i64> @s_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) { +; CHECK-LABEL: s_nxv4i64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: cmpgt p1.d, p0/z, z0.d, z2.d +; CHECK-NEXT: cmpgt p2.d, p0/z, z1.d, z3.d +; CHECK-NEXT: cmpgt p3.d, p0/z, z2.d, z0.d +; CHECK-NEXT: cmpgt p0.d, p0/z, z3.d, z1.d +; CHECK-NEXT: mov z0.d, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z1.d, p2/z, #1 // =0x1 +; CHECK-NEXT: mov z0.d, p3/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.d, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 4 x i64> @llvm.scmp(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) + ret <vscale x 4 x i64> %c +} diff --git a/llvm/test/CodeGen/AArch64/sve-ucmp.ll b/llvm/test/CodeGen/AArch64/sve-ucmp.ll new file mode 100644 index 0000000..0ee3182 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-ucmp.ll @@ -0,0 +1,160 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=aarch64 -mattr=+sve2 -verify-machineinstrs %s -o - | FileCheck %s + +define <vscale x 8 x i8> @u_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) { +; CHECK-LABEL: u_nxv8i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: and z0.h, z0.h, #0xff +; CHECK-NEXT: and z1.h, z1.h, #0xff +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: cmphi p1.h, p0/z, z0.h, z1.h +; CHECK-NEXT: cmphi p0.h, p0/z, z1.h, z0.h +; CHECK-NEXT: mov z0.h, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z0.h, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 8 x i8> @llvm.ucmp(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) + ret <vscale x 8 x i8> %c +} + +define <vscale x 16 x i8> @u_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { +; CHECK-LABEL: u_nxv16i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: cmphi p1.b, p0/z, z0.b, z1.b +; CHECK-NEXT: cmphi p0.b, p0/z, z1.b, z0.b +; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z0.b, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 16 x i8> @llvm.ucmp(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) + ret <vscale x 16 x i8> %c +} + +define <vscale x 4 x i16> @u_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) { +; CHECK-LABEL: u_nxv4i16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: and z0.s, z0.s, #0xffff +; CHECK-NEXT: and z1.s, z1.s, #0xffff +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: cmphi p1.s, p0/z, z0.s, z1.s +; CHECK-NEXT: cmphi p0.s, p0/z, z1.s, z0.s +; CHECK-NEXT: mov z0.s, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z0.s, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 4 x i16> @llvm.ucmp(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) + ret <vscale x 4 x i16> %c +} + +define <vscale x 8 x i16> @u_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { +; CHECK-LABEL: u_nxv8i16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: cmphi p1.h, p0/z, z0.h, z1.h +; CHECK-NEXT: cmphi p0.h, p0/z, z1.h, z0.h +; CHECK-NEXT: mov z0.h, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z0.h, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 8 x i16> @llvm.ucmp(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) + ret <vscale x 8 x i16> %c +} + +define <vscale x 16 x i16> @u_nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) { +; CHECK-LABEL: u_nxv16i16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: cmphi p1.h, p0/z, z0.h, z2.h +; CHECK-NEXT: cmphi p2.h, p0/z, z1.h, z3.h +; CHECK-NEXT: cmphi p3.h, p0/z, z2.h, z0.h +; CHECK-NEXT: cmphi p0.h, p0/z, z3.h, z1.h +; CHECK-NEXT: mov z0.h, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z1.h, p2/z, #1 // =0x1 +; CHECK-NEXT: mov z0.h, p3/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.h, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 16 x i16> @llvm.ucmp(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) + ret <vscale x 16 x i16> %c +} + +define <vscale x 2 x i32> @u_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) { +; CHECK-LABEL: u_nxv2i32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: and z0.d, z0.d, #0xffffffff +; CHECK-NEXT: and z1.d, z1.d, #0xffffffff +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: cmphi p1.d, p0/z, z0.d, z1.d +; CHECK-NEXT: cmphi p0.d, p0/z, z1.d, z0.d +; CHECK-NEXT: mov z0.d, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z0.d, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 2 x i32> @llvm.ucmp(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) + ret <vscale x 2 x i32> %c +} + +define <vscale x 4 x i32> @u_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { +; CHECK-LABEL: u_nxv4i32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: cmphi p1.s, p0/z, z0.s, z1.s +; CHECK-NEXT: cmphi p0.s, p0/z, z1.s, z0.s +; CHECK-NEXT: mov z0.s, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z0.s, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 4 x i32> @llvm.ucmp(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) + ret <vscale x 4 x i32> %c +} + +define <vscale x 8 x i32> @u_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) { +; CHECK-LABEL: u_nxv8i32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: cmphi p1.s, p0/z, z0.s, z2.s +; CHECK-NEXT: cmphi p2.s, p0/z, z1.s, z3.s +; CHECK-NEXT: cmphi p3.s, p0/z, z2.s, z0.s +; CHECK-NEXT: cmphi p0.s, p0/z, z3.s, z1.s +; CHECK-NEXT: mov z0.s, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z1.s, p2/z, #1 // =0x1 +; CHECK-NEXT: mov z0.s, p3/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.s, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 8 x i32> @llvm.ucmp(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) + ret <vscale x 8 x i32> %c +} + +define <vscale x 2 x i64> @u_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { +; CHECK-LABEL: u_nxv2i64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: cmphi p1.d, p0/z, z0.d, z1.d +; CHECK-NEXT: cmphi p0.d, p0/z, z1.d, z0.d +; CHECK-NEXT: mov z0.d, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z0.d, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 2 x i64> @llvm.ucmp(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) + ret <vscale x 2 x i64> %c +} + +define <vscale x 4 x i64> @u_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) { +; CHECK-LABEL: u_nxv4i64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: cmphi p1.d, p0/z, z0.d, z2.d +; CHECK-NEXT: cmphi p2.d, p0/z, z1.d, z3.d +; CHECK-NEXT: cmphi p3.d, p0/z, z2.d, z0.d +; CHECK-NEXT: cmphi p0.d, p0/z, z3.d, z1.d +; CHECK-NEXT: mov z0.d, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z1.d, p2/z, #1 // =0x1 +; CHECK-NEXT: mov z0.d, p3/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.d, p0/m, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ret +entry: + %c = call <vscale x 4 x i64> @llvm.ucmp(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) + ret <vscale x 4 x i64> %c +} diff --git a/llvm/test/CodeGen/AArch64/ucmp.ll b/llvm/test/CodeGen/AArch64/ucmp.ll index 1a7f0be..7e94cb6 100644 --- a/llvm/test/CodeGen/AArch64/ucmp.ll +++ b/llvm/test/CodeGen/AArch64/ucmp.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD -; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI +; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI define i8 @ucmp.8.8(i8 %x, i8 %y) nounwind { ; CHECK-SD-LABEL: ucmp.8.8: @@ -172,3 +172,416 @@ define <1 x i64> @ucmp.1.64.65(<1 x i65> %x, <1 x i65> %y) { %1 = call <1 x i64> @llvm.ucmp(<1 x i65> %x, <1 x i65> %y) ret <1 x i64> %1 } + +define <8 x i8> @u_v8i8(<8 x i8> %a, <8 x i8> %b) { +; CHECK-SD-LABEL: u_v8i8: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v2.8b, #1 +; CHECK-SD-NEXT: cmhi v3.8b, v0.8b, v1.8b +; CHECK-SD-NEXT: cmhi v0.8b, v1.8b, v0.8b +; CHECK-SD-NEXT: and v1.8b, v3.8b, v2.8b +; CHECK-SD-NEXT: orr v0.8b, v1.8b, v0.8b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: u_v8i8: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v2.8b, #1 +; CHECK-GI-NEXT: cmhi v3.8b, v0.8b, v1.8b +; CHECK-GI-NEXT: movi d4, #0xffffffffffffffff +; CHECK-GI-NEXT: cmhi v0.8b, v1.8b, v0.8b +; CHECK-GI-NEXT: and v2.8b, v2.8b, v3.8b +; CHECK-GI-NEXT: bsl v0.8b, v4.8b, v2.8b +; CHECK-GI-NEXT: ret +entry: + %c = call <8 x i8> @llvm.ucmp(<8 x i8> %a, <8 x i8> %b) + ret <8 x i8> %c +} + +define <16 x i8> @u_v16i8(<16 x i8> %a, <16 x i8> %b) { +; CHECK-SD-LABEL: u_v16i8: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v2.16b, #1 +; CHECK-SD-NEXT: cmhi v3.16b, v0.16b, v1.16b +; CHECK-SD-NEXT: cmhi v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: and v1.16b, v3.16b, v2.16b +; CHECK-SD-NEXT: orr v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: u_v16i8: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v2.16b, #1 +; CHECK-GI-NEXT: cmhi v3.16b, v0.16b, v1.16b +; CHECK-GI-NEXT: movi v4.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: cmhi v0.16b, v1.16b, v0.16b +; CHECK-GI-NEXT: and v2.16b, v2.16b, v3.16b +; CHECK-GI-NEXT: bsl v0.16b, v4.16b, v2.16b +; CHECK-GI-NEXT: ret +entry: + %c = call <16 x i8> @llvm.ucmp(<16 x i8> %a, <16 x i8> %b) + ret <16 x i8> %c +} + +define <4 x i16> @u_v4i16(<4 x i16> %a, <4 x i16> %b) { +; CHECK-SD-LABEL: u_v4i16: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v2.4h, #1 +; CHECK-SD-NEXT: cmhi v3.4h, v0.4h, v1.4h +; CHECK-SD-NEXT: cmhi v0.4h, v1.4h, v0.4h +; CHECK-SD-NEXT: and v1.8b, v3.8b, v2.8b +; CHECK-SD-NEXT: orr v0.8b, v1.8b, v0.8b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: u_v4i16: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v2.4h, #1 +; CHECK-GI-NEXT: cmhi v3.4h, v0.4h, v1.4h +; CHECK-GI-NEXT: movi d4, #0xffffffffffffffff +; CHECK-GI-NEXT: cmhi v0.4h, v1.4h, v0.4h +; CHECK-GI-NEXT: and v2.8b, v2.8b, v3.8b +; CHECK-GI-NEXT: bsl v0.8b, v4.8b, v2.8b +; CHECK-GI-NEXT: ret +entry: + %c = call <4 x i16> @llvm.ucmp(<4 x i16> %a, <4 x i16> %b) + ret <4 x i16> %c +} + +define <8 x i16> @u_v8i16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-SD-LABEL: u_v8i16: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v2.8h, #1 +; CHECK-SD-NEXT: cmhi v3.8h, v0.8h, v1.8h +; CHECK-SD-NEXT: cmhi v0.8h, v1.8h, v0.8h +; CHECK-SD-NEXT: and v1.16b, v3.16b, v2.16b +; CHECK-SD-NEXT: orr v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: u_v8i16: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v2.8h, #1 +; CHECK-GI-NEXT: cmhi v3.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: movi v4.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: cmhi v0.8h, v1.8h, v0.8h +; CHECK-GI-NEXT: and v2.16b, v2.16b, v3.16b +; CHECK-GI-NEXT: bsl v0.16b, v4.16b, v2.16b +; CHECK-GI-NEXT: ret +entry: + %c = call <8 x i16> @llvm.ucmp(<8 x i16> %a, <8 x i16> %b) + ret <8 x i16> %c +} + +define <16 x i16> @u_v16i16(<16 x i16> %a, <16 x i16> %b) { +; CHECK-SD-LABEL: u_v16i16: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v4.8h, #1 +; CHECK-SD-NEXT: cmhi v5.8h, v0.8h, v2.8h +; CHECK-SD-NEXT: cmhi v6.8h, v1.8h, v3.8h +; CHECK-SD-NEXT: cmhi v0.8h, v2.8h, v0.8h +; CHECK-SD-NEXT: cmhi v1.8h, v3.8h, v1.8h +; CHECK-SD-NEXT: and v2.16b, v5.16b, v4.16b +; CHECK-SD-NEXT: and v3.16b, v6.16b, v4.16b +; CHECK-SD-NEXT: orr v0.16b, v2.16b, v0.16b +; CHECK-SD-NEXT: orr v1.16b, v3.16b, v1.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: u_v16i16: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v4.8h, #1 +; CHECK-GI-NEXT: cmhi v5.8h, v0.8h, v2.8h +; CHECK-GI-NEXT: cmhi v6.8h, v1.8h, v3.8h +; CHECK-GI-NEXT: movi v7.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: cmhi v0.8h, v2.8h, v0.8h +; CHECK-GI-NEXT: cmhi v1.8h, v3.8h, v1.8h +; CHECK-GI-NEXT: and v5.16b, v4.16b, v5.16b +; CHECK-GI-NEXT: and v4.16b, v4.16b, v6.16b +; CHECK-GI-NEXT: bsl v0.16b, v7.16b, v5.16b +; CHECK-GI-NEXT: bsl v1.16b, v7.16b, v4.16b +; CHECK-GI-NEXT: ret +entry: + %c = call <16 x i16> @llvm.ucmp(<16 x i16> %a, <16 x i16> %b) + ret <16 x i16> %c +} + +define <2 x i32> @u_v2i32(<2 x i32> %a, <2 x i32> %b) { +; CHECK-SD-LABEL: u_v2i32: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v2.2s, #1 +; CHECK-SD-NEXT: cmhi v3.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: cmhi v0.2s, v1.2s, v0.2s +; CHECK-SD-NEXT: and v1.8b, v3.8b, v2.8b +; CHECK-SD-NEXT: orr v0.8b, v1.8b, v0.8b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: u_v2i32: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v2.2s, #1 +; CHECK-GI-NEXT: cmhi v3.2s, v0.2s, v1.2s +; CHECK-GI-NEXT: movi d4, #0xffffffffffffffff +; CHECK-GI-NEXT: cmhi v0.2s, v1.2s, v0.2s +; CHECK-GI-NEXT: and v2.8b, v2.8b, v3.8b +; CHECK-GI-NEXT: bsl v0.8b, v4.8b, v2.8b +; CHECK-GI-NEXT: ret +entry: + %c = call <2 x i32> @llvm.ucmp(<2 x i32> %a, <2 x i32> %b) + ret <2 x i32> %c +} + +define <4 x i32> @u_v4i32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-SD-LABEL: u_v4i32: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v2.4s, #1 +; CHECK-SD-NEXT: cmhi v3.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: cmhi v0.4s, v1.4s, v0.4s +; CHECK-SD-NEXT: and v1.16b, v3.16b, v2.16b +; CHECK-SD-NEXT: orr v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: u_v4i32: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v2.4s, #1 +; CHECK-GI-NEXT: cmhi v3.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: movi v4.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: cmhi v0.4s, v1.4s, v0.4s +; CHECK-GI-NEXT: and v2.16b, v2.16b, v3.16b +; CHECK-GI-NEXT: bsl v0.16b, v4.16b, v2.16b +; CHECK-GI-NEXT: ret +entry: + %c = call <4 x i32> @llvm.ucmp(<4 x i32> %a, <4 x i32> %b) + ret <4 x i32> %c +} + +define <8 x i32> @u_v8i32(<8 x i32> %a, <8 x i32> %b) { +; CHECK-SD-LABEL: u_v8i32: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: movi v4.4s, #1 +; CHECK-SD-NEXT: cmhi v5.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: cmhi v6.4s, v1.4s, v3.4s +; CHECK-SD-NEXT: cmhi v0.4s, v2.4s, v0.4s +; CHECK-SD-NEXT: cmhi v1.4s, v3.4s, v1.4s +; CHECK-SD-NEXT: and v2.16b, v5.16b, v4.16b +; CHECK-SD-NEXT: and v3.16b, v6.16b, v4.16b +; CHECK-SD-NEXT: orr v0.16b, v2.16b, v0.16b +; CHECK-SD-NEXT: orr v1.16b, v3.16b, v1.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: u_v8i32: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v4.4s, #1 +; CHECK-GI-NEXT: cmhi v5.4s, v0.4s, v2.4s +; CHECK-GI-NEXT: cmhi v6.4s, v1.4s, v3.4s +; CHECK-GI-NEXT: movi v7.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: cmhi v0.4s, v2.4s, v0.4s +; CHECK-GI-NEXT: cmhi v1.4s, v3.4s, v1.4s +; CHECK-GI-NEXT: and v5.16b, v4.16b, v5.16b +; CHECK-GI-NEXT: and v4.16b, v4.16b, v6.16b +; CHECK-GI-NEXT: bsl v0.16b, v7.16b, v5.16b +; CHECK-GI-NEXT: bsl v1.16b, v7.16b, v4.16b +; CHECK-GI-NEXT: ret +entry: + %c = call <8 x i32> @llvm.ucmp(<8 x i32> %a, <8 x i32> %b) + ret <8 x i32> %c +} + +define <2 x i64> @u_v2i64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-SD-LABEL: u_v2i64: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: mov w8, #1 // =0x1 +; CHECK-SD-NEXT: cmhi v2.2d, v0.2d, v1.2d +; CHECK-SD-NEXT: cmhi v0.2d, v1.2d, v0.2d +; CHECK-SD-NEXT: dup v3.2d, x8 +; CHECK-SD-NEXT: and v1.16b, v2.16b, v3.16b +; CHECK-SD-NEXT: orr v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: u_v2i64: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: adrp x8, .LCPI17_0 +; CHECK-GI-NEXT: cmhi v2.2d, v0.2d, v1.2d +; CHECK-GI-NEXT: movi v4.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI17_0] +; CHECK-GI-NEXT: cmhi v0.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: and v2.16b, v3.16b, v2.16b +; CHECK-GI-NEXT: bsl v0.16b, v4.16b, v2.16b +; CHECK-GI-NEXT: ret +entry: + %c = call <2 x i64> @llvm.ucmp(<2 x i64> %a, <2 x i64> %b) + ret <2 x i64> %c +} + +define <4 x i64> @u_v4i64(<4 x i64> %a, <4 x i64> %b) { +; CHECK-SD-LABEL: u_v4i64: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: mov w8, #1 // =0x1 +; CHECK-SD-NEXT: cmhi v4.2d, v0.2d, v2.2d +; CHECK-SD-NEXT: cmhi v6.2d, v1.2d, v3.2d +; CHECK-SD-NEXT: dup v5.2d, x8 +; CHECK-SD-NEXT: cmhi v0.2d, v2.2d, v0.2d +; CHECK-SD-NEXT: cmhi v1.2d, v3.2d, v1.2d +; CHECK-SD-NEXT: and v2.16b, v4.16b, v5.16b +; CHECK-SD-NEXT: and v3.16b, v6.16b, v5.16b +; CHECK-SD-NEXT: orr v0.16b, v2.16b, v0.16b +; CHECK-SD-NEXT: orr v1.16b, v3.16b, v1.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: u_v4i64: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: adrp x8, .LCPI18_0 +; CHECK-GI-NEXT: cmhi v4.2d, v0.2d, v2.2d +; CHECK-GI-NEXT: cmhi v6.2d, v1.2d, v3.2d +; CHECK-GI-NEXT: ldr q5, [x8, :lo12:.LCPI18_0] +; CHECK-GI-NEXT: movi v7.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: cmhi v0.2d, v2.2d, v0.2d +; CHECK-GI-NEXT: cmhi v1.2d, v3.2d, v1.2d +; CHECK-GI-NEXT: and v4.16b, v5.16b, v4.16b +; CHECK-GI-NEXT: and v5.16b, v5.16b, v6.16b +; CHECK-GI-NEXT: bsl v0.16b, v7.16b, v4.16b +; CHECK-GI-NEXT: bsl v1.16b, v7.16b, v5.16b +; CHECK-GI-NEXT: ret +entry: + %c = call <4 x i64> @llvm.ucmp(<4 x i64> %a, <4 x i64> %b) + ret <4 x i64> %c +} + +define <16 x i8> @signOf_neon(<8 x i16> %s0_lo, <8 x i16> %s0_hi, <8 x i16> %s1_lo, <8 x i16> %s1_hi) { +; CHECK-SD-LABEL: signOf_neon: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmhi v5.8h, v0.8h, v2.8h +; CHECK-SD-NEXT: cmhi v2.8h, v2.8h, v0.8h +; CHECK-SD-NEXT: cmhi v4.8h, v1.8h, v3.8h +; CHECK-SD-NEXT: cmhi v1.8h, v3.8h, v1.8h +; CHECK-SD-NEXT: umov w8, v5.h[1] +; CHECK-SD-NEXT: umov w9, v2.h[1] +; CHECK-SD-NEXT: umov w10, v5.h[0] +; CHECK-SD-NEXT: umov w11, v2.h[0] +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: cset w8, ne +; CHECK-SD-NEXT: tst w9, #0xffff +; CHECK-SD-NEXT: csinv w8, w8, wzr, eq +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v5.h[2] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w11, #0xffff +; CHECK-SD-NEXT: umov w11, v2.h[2] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: fmov s0, w9 +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v2.h[3] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w11, #0xffff +; CHECK-SD-NEXT: mov v0.b[1], w8 +; CHECK-SD-NEXT: umov w8, v5.h[3] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[2], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v5.h[4] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v2.h[4] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[3], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v5.h[5] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v2.h[5] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[4], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v5.h[6] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v2.h[6] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[5], w9 +; CHECK-SD-NEXT: umov w9, v5.h[7] +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: cset w8, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v2.h[7] +; CHECK-SD-NEXT: csinv w8, w8, wzr, eq +; CHECK-SD-NEXT: mov v0.b[6], w8 +; CHECK-SD-NEXT: tst w9, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[0] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[0] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[7], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[1] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[1] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[8], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[2] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[2] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[9], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[3] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[3] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[10], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[4] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[4] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[11], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[5] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[5] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[12], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[6] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[6] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[13], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: umov w8, v4.h[7] +; CHECK-SD-NEXT: cset w9, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: umov w10, v1.h[7] +; CHECK-SD-NEXT: csinv w9, w9, wzr, eq +; CHECK-SD-NEXT: mov v0.b[14], w9 +; CHECK-SD-NEXT: tst w8, #0xffff +; CHECK-SD-NEXT: cset w8, ne +; CHECK-SD-NEXT: tst w10, #0xffff +; CHECK-SD-NEXT: csinv w8, w8, wzr, eq +; CHECK-SD-NEXT: mov v0.b[15], w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: signOf_neon: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: cmhi v4.8h, v0.8h, v2.8h +; CHECK-GI-NEXT: cmhi v5.8h, v1.8h, v3.8h +; CHECK-GI-NEXT: cmhi v0.8h, v2.8h, v0.8h +; CHECK-GI-NEXT: cmhi v1.8h, v3.8h, v1.8h +; CHECK-GI-NEXT: movi v2.16b, #1 +; CHECK-GI-NEXT: movi v3.2d, #0xffffffffffffffff +; CHECK-GI-NEXT: uzp1 v4.16b, v4.16b, v5.16b +; CHECK-GI-NEXT: uzp1 v0.16b, v0.16b, v1.16b +; CHECK-GI-NEXT: shl v1.16b, v4.16b, #7 +; CHECK-GI-NEXT: shl v0.16b, v0.16b, #7 +; CHECK-GI-NEXT: sshr v1.16b, v1.16b, #7 +; CHECK-GI-NEXT: sshr v0.16b, v0.16b, #7 +; CHECK-GI-NEXT: and v1.16b, v2.16b, v1.16b +; CHECK-GI-NEXT: bsl v0.16b, v3.16b, v1.16b +; CHECK-GI-NEXT: ret +entry: + %0 = shufflevector <8 x i16> %s0_lo, <8 x i16> %s0_hi, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + %1 = shufflevector <8 x i16> %s1_lo, <8 x i16> %s1_hi, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + %or.i = tail call <16 x i8> @llvm.ucmp.v16i8.v16i16(<16 x i16> %0, <16 x i16> %1) + ret <16 x i8> %or.i +} diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll index 1f1c265..fff06e4 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll @@ -268,7 +268,7 @@ define amdgpu_kernel void @v256i8_liveout(ptr addrspace(1) %src1, ptr addrspace( ; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0 ; GFX906-NEXT: s_waitcnt vmcnt(0) ; GFX906-NEXT: buffer_store_dword v5, off, s[12:15], 0 ; 4-byte Folded Spill -; GFX906-NEXT: s_waitcnt vmcnt(0) +; GFX906-NEXT: s_nop 0 ; GFX906-NEXT: buffer_store_dword v6, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill ; GFX906-NEXT: buffer_store_dword v7, off, s[12:15], 0 offset:8 ; 4-byte Folded Spill ; GFX906-NEXT: buffer_store_dword v8, off, s[12:15], 0 offset:12 ; 4-byte Folded Spill @@ -294,7 +294,7 @@ define amdgpu_kernel void @v256i8_liveout(ptr addrspace(1) %src1, ptr addrspace( ; GFX906-NEXT: global_load_dwordx4 v[0:3], v4, s[6:7] ; GFX906-NEXT: s_waitcnt vmcnt(0) ; GFX906-NEXT: buffer_store_dword v0, off, s[12:15], 0 ; 4-byte Folded Spill -; GFX906-NEXT: s_waitcnt vmcnt(0) +; GFX906-NEXT: s_nop 0 ; GFX906-NEXT: buffer_store_dword v1, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill ; GFX906-NEXT: buffer_store_dword v2, off, s[12:15], 0 offset:8 ; 4-byte Folded Spill ; GFX906-NEXT: buffer_store_dword v3, off, s[12:15], 0 offset:12 ; 4-byte Folded Spill @@ -317,7 +317,7 @@ define amdgpu_kernel void @v256i8_liveout(ptr addrspace(1) %src1, ptr addrspace( ; GFX906-NEXT: s_or_b64 exec, exec, s[2:3] ; GFX906-NEXT: s_waitcnt vmcnt(0) ; GFX906-NEXT: buffer_store_dword v0, off, s[12:15], 0 offset:16 ; 4-byte Folded Spill -; GFX906-NEXT: s_waitcnt vmcnt(0) +; GFX906-NEXT: s_nop 0 ; GFX906-NEXT: buffer_store_dword v1, off, s[12:15], 0 offset:20 ; 4-byte Folded Spill ; GFX906-NEXT: buffer_store_dword v2, off, s[12:15], 0 offset:24 ; 4-byte Folded Spill ; GFX906-NEXT: buffer_store_dword v3, off, s[12:15], 0 offset:28 ; 4-byte Folded Spill diff --git a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll index fb96b9f..49b450a 100644 --- a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll +++ b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll @@ -185,7 +185,7 @@ define void @no_free_vgprs_at_agpr_to_agpr_copy(float %v0, float %v1) #0 { ; GFX90A-NEXT: s_nop 7 ; GFX90A-NEXT: s_nop 2 ; GFX90A-NEXT: buffer_store_dword a0, off, s[0:3], s32 ; 4-byte Folded Spill -; GFX90A-NEXT: s_waitcnt vmcnt(0) +; GFX90A-NEXT: s_nop 0 ; GFX90A-NEXT: buffer_store_dword a1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill ; GFX90A-NEXT: buffer_store_dword a2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; GFX90A-NEXT: buffer_store_dword a3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill @@ -215,7 +215,6 @@ define void @no_free_vgprs_at_agpr_to_agpr_copy(float %v0, float %v1) #0 { ; GFX90A-NEXT: buffer_load_dword a7, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload ; GFX90A-NEXT: buffer_load_dword a8, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload ; GFX90A-NEXT: buffer_load_dword a9, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload -; GFX90A-NEXT: s_waitcnt vmcnt(9) ; GFX90A-NEXT: v_accvgpr_write_b32 a10, v39 ; Reload Reuse ; GFX90A-NEXT: v_accvgpr_write_b32 a11, v38 ; Reload Reuse ; GFX90A-NEXT: v_accvgpr_write_b32 a12, v37 ; Reload Reuse @@ -1093,7 +1092,7 @@ define void @no_free_vgprs_at_sgpr_to_agpr_copy(float %v0, float %v1) #0 { ; GFX90A-NEXT: s_nop 7 ; GFX90A-NEXT: s_nop 2 ; GFX90A-NEXT: buffer_store_dword a0, off, s[0:3], s32 ; 4-byte Folded Spill -; GFX90A-NEXT: s_waitcnt vmcnt(0) +; GFX90A-NEXT: s_nop 0 ; GFX90A-NEXT: buffer_store_dword a1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill ; GFX90A-NEXT: buffer_store_dword a2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; GFX90A-NEXT: buffer_store_dword a3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill @@ -1124,7 +1123,6 @@ define void @no_free_vgprs_at_sgpr_to_agpr_copy(float %v0, float %v1) #0 { ; GFX90A-NEXT: buffer_load_dword a8, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload ; GFX90A-NEXT: buffer_load_dword a9, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload ; GFX90A-NEXT: buffer_load_dword a10, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; GFX90A-NEXT: s_waitcnt vmcnt(10) ; GFX90A-NEXT: v_accvgpr_write_b32 a11, v39 ; Reload Reuse ; GFX90A-NEXT: v_accvgpr_write_b32 a12, v38 ; Reload Reuse ; GFX90A-NEXT: v_accvgpr_write_b32 a13, v37 ; Reload Reuse diff --git a/llvm/test/CodeGen/AMDGPU/collapse-endcf.ll b/llvm/test/CodeGen/AMDGPU/collapse-endcf.ll index 75f5eda..51f9cf73 100644 --- a/llvm/test/CodeGen/AMDGPU/collapse-endcf.ll +++ b/llvm/test/CodeGen/AMDGPU/collapse-endcf.ll @@ -708,7 +708,6 @@ define amdgpu_kernel void @nested_if_else_if(ptr addrspace(1) nocapture %arg) { ; GCN-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec ; GCN-O0-NEXT: v_mov_b32_e32 v6, v2 ; GCN-O0-NEXT: buffer_store_dword v5, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill -; GCN-O0-NEXT: s_waitcnt vmcnt(0) ; GCN-O0-NEXT: buffer_store_dword v6, off, s[12:15], 0 offset:8 ; 4-byte Folded Spill ; GCN-O0-NEXT: s_mov_b32 s1, 0xf000 ; GCN-O0-NEXT: s_mov_b32 s2, 0 @@ -722,6 +721,7 @@ define amdgpu_kernel void @nested_if_else_if(ptr addrspace(1) nocapture %arg) { ; GCN-O0-NEXT: s_mov_b64 s[2:3], exec ; GCN-O0-NEXT: s_and_b64 s[0:1], s[2:3], s[0:1] ; GCN-O0-NEXT: s_xor_b64 s[2:3], s[0:1], s[2:3] +; GCN-O0-NEXT: s_waitcnt vmcnt(4) ; GCN-O0-NEXT: v_writelane_b32 v0, s2, 0 ; GCN-O0-NEXT: v_writelane_b32 v0, s3, 1 ; GCN-O0-NEXT: s_or_saveexec_b64 s[8:9], -1 @@ -1159,7 +1159,6 @@ define void @scc_liveness(i32 %arg) local_unnamed_addr #0 { ; GCN-O0-NEXT: v_mov_b32_e32 v3, s10 ; GCN-O0-NEXT: v_mov_b32_e32 v4, s11 ; GCN-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; GCN-O0-NEXT: s_waitcnt vmcnt(0) ; GCN-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill ; GCN-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; GCN-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill @@ -1193,7 +1192,6 @@ define void @scc_liveness(i32 %arg) local_unnamed_addr #0 { ; GCN-O0-NEXT: v_mov_b32_e32 v3, s10 ; GCN-O0-NEXT: v_mov_b32_e32 v4, s11 ; GCN-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; GCN-O0-NEXT: s_waitcnt vmcnt(0) ; GCN-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill ; GCN-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill ; GCN-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill @@ -1225,7 +1223,6 @@ define void @scc_liveness(i32 %arg) local_unnamed_addr #0 { ; GCN-O0-NEXT: v_mov_b32_e32 v2, s6 ; GCN-O0-NEXT: v_mov_b32_e32 v3, s7 ; GCN-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; GCN-O0-NEXT: s_waitcnt vmcnt(0) ; GCN-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill ; GCN-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill ; GCN-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill @@ -1247,7 +1244,6 @@ define void @scc_liveness(i32 %arg) local_unnamed_addr #0 { ; GCN-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload ; GCN-O0-NEXT: s_waitcnt vmcnt(0) ; GCN-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill -; GCN-O0-NEXT: s_waitcnt vmcnt(0) ; GCN-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill ; GCN-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill ; GCN-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill @@ -1269,7 +1265,6 @@ define void @scc_liveness(i32 %arg) local_unnamed_addr #0 { ; GCN-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload ; GCN-O0-NEXT: s_waitcnt vmcnt(0) ; GCN-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; GCN-O0-NEXT: s_waitcnt vmcnt(0) ; GCN-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill ; GCN-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; GCN-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill @@ -1343,7 +1338,6 @@ define void @scc_liveness(i32 %arg) local_unnamed_addr #0 { ; GCN-O0-NEXT: s_mov_b64 exec, s[14:15] ; GCN-O0-NEXT: s_waitcnt vmcnt(1) ; GCN-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill -; GCN-O0-NEXT: s_waitcnt vmcnt(0) ; GCN-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill ; GCN-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; GCN-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill diff --git a/llvm/test/CodeGen/AMDGPU/div_i128.ll b/llvm/test/CodeGen/AMDGPU/div_i128.ll index fea1303..712cecf 100644 --- a/llvm/test/CodeGen/AMDGPU/div_i128.ll +++ b/llvm/test/CodeGen/AMDGPU/div_i128.ll @@ -345,30 +345,30 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: s_mov_b32 s4, 63 ; GFX9-O0-NEXT: v_ashrrev_i64 v[13:14], s4, v[13:14] ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v14, v12 ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v11 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v18 ; GFX9-O0-NEXT: v_mov_b32_e32 v14, v19 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v14, v6 ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v5 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v16 ; GFX9-O0-NEXT: v_mov_b32_e32 v14, v17 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v14, v12 ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v19 @@ -483,14 +483,14 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v9 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7 ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], s[6:7] ; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f @@ -538,10 +538,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v5 ; GFX9-O0-NEXT: s_and_b64 s[6:7], s[4:5], s[6:7] ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[4:5], exec ; GFX9-O0-NEXT: v_writelane_b32 v0, s4, 4 @@ -572,16 +572,19 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB0_5 ; GFX9-O0-NEXT: .LBB0_3: ; %Flow2 @@ -598,10 +601,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB0_9 ; GFX9-O0-NEXT: .LBB0_4: ; %udiv-loop-exit @@ -638,10 +641,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v4 ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB0_3 ; GFX9-O0-NEXT: .LBB0_5: ; %Flow1 @@ -662,16 +665,16 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB0_4 ; GFX9-O0-NEXT: .LBB0_6: ; %udiv-do-while @@ -756,11 +759,12 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5 ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v23 ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24 -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_waitcnt vmcnt(1) ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v11 ; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14 ; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15 ; GFX9-O0-NEXT: v_sub_co_u32_e32 v13, vcc, v13, v6 +; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: v_subb_co_u32_e32 v12, vcc, v12, v10, vcc ; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v11, v4, vcc ; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v5, vcc @@ -846,22 +850,22 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v3 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v2 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v1 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v0 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v14 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v13 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5] ; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 6 @@ -873,28 +877,28 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 exec, s[22:23] ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX9-O0-NEXT: s_cbranch_execnz .LBB0_6 @@ -919,7 +923,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; GFX9-O0-NEXT: s_waitcnt vmcnt(9) +; GFX9-O0-NEXT: s_waitcnt vmcnt(10) ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10 ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], v4, v[21:22] @@ -992,10 +996,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v17 ; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7] ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[6:7] ; GFX9-O0-NEXT: v_mov_b32_e32 v15, s9 @@ -1008,28 +1012,28 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 exec, s[22:23] ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB0_6 ; GFX9-O0-NEXT: .LBB0_8: ; %udiv-bb1 @@ -1046,7 +1050,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1 ; GFX9-O0-NEXT: s_mov_b32 s5, s6 -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_waitcnt vmcnt(1) ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1 ; GFX9-O0-NEXT: s_mov_b32 s4, s7 ; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0 @@ -1057,6 +1061,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5 ; GFX9-O0-NEXT: v_add_co_u32_e32 v9, vcc, v4, v5 ; GFX9-O0-NEXT: v_mov_b32_e32 v5, s4 +; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v5, vcc ; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8 ; GFX9-O0-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc @@ -1073,12 +1078,12 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2 ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9 ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b32 s4, 0x7f ; GFX9-O0-NEXT: v_sub_u32_e64 v3, s4, v4 @@ -1126,10 +1131,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v8, v3 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2 ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v10 @@ -1146,16 +1151,16 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6 ; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[6:7], exec ; GFX9-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5] @@ -1455,7 +1460,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v2 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v3 ; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill @@ -1682,7 +1687,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], -1 ; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7] ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill @@ -1715,12 +1720,13 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(4) ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(4) ; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill @@ -1739,7 +1745,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill @@ -1753,9 +1759,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(2) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5 +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v6 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v7 ; GFX9-G-O0-NEXT: s_mov_b32 s4, 1 @@ -1794,7 +1801,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5 ; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill @@ -1817,12 +1824,12 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill @@ -1860,9 +1867,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0 -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(16) +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(18) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v2 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v3 +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(16) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v4 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5 ; GFX9-G-O0-NEXT: s_mov_b32 s8, 1 @@ -1912,9 +1920,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s8 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v0 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1 -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(8) +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(10) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v29, v31 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v30, v32 +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(8) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v25, v33 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v26, v34 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v29 @@ -2001,7 +2010,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v1 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v0 ; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill @@ -2010,7 +2019,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v13 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v12 ; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill @@ -2024,22 +2033,22 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21] ; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill @@ -2142,22 +2151,22 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, s5 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s4 ; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill @@ -2195,7 +2204,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v8 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v7 ; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill @@ -2246,7 +2255,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v9 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v10 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill @@ -2260,7 +2269,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s4 ; GFX9-G-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[5:6], v[7:8] ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill @@ -2269,7 +2278,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s10 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s11 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill @@ -2296,9 +2305,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(2) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v9 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v10 +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v11 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v12 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v1 @@ -2553,22 +2563,23 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11 ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v12 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_waitcnt vmcnt(3) ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2 ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13 ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9 ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v7, v12 ; GFX9-O0-NEXT: v_mov_b32_e32 v8, v2 @@ -2579,6 +2590,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3 ; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0 +; GFX9-O0-NEXT: s_waitcnt vmcnt(8) ; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 0 ; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 1 ; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[1:2], s[6:7] @@ -2688,14 +2700,14 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v9 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7 ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], s[6:7] ; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f @@ -2743,10 +2755,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v5 ; GFX9-O0-NEXT: s_and_b64 s[6:7], s[4:5], s[6:7] ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[4:5], exec ; GFX9-O0-NEXT: v_writelane_b32 v0, s4, 2 @@ -2777,16 +2789,19 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB1_5 ; GFX9-O0-NEXT: .LBB1_3: ; %Flow2 @@ -2803,10 +2818,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB1_9 ; GFX9-O0-NEXT: .LBB1_4: ; %udiv-loop-exit @@ -2843,10 +2858,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v4 ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB1_3 ; GFX9-O0-NEXT: .LBB1_5: ; %Flow1 @@ -2867,16 +2882,16 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB1_4 ; GFX9-O0-NEXT: .LBB1_6: ; %udiv-do-while @@ -2961,11 +2976,12 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5 ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v23 ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24 -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_waitcnt vmcnt(1) ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v11 ; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14 ; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15 ; GFX9-O0-NEXT: v_sub_co_u32_e32 v13, vcc, v13, v6 +; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: v_subb_co_u32_e32 v12, vcc, v12, v10, vcc ; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v11, v4, vcc ; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v5, vcc @@ -3051,22 +3067,22 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v3 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v2 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v1 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v0 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v14 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v13 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5] ; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 4 @@ -3078,28 +3094,28 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19] ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX9-O0-NEXT: s_cbranch_execnz .LBB1_6 @@ -3124,7 +3140,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; GFX9-O0-NEXT: s_waitcnt vmcnt(9) +; GFX9-O0-NEXT: s_waitcnt vmcnt(10) ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10 ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], v4, v[21:22] @@ -3197,10 +3213,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v17 ; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7] ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[6:7] ; GFX9-O0-NEXT: v_mov_b32_e32 v15, s9 @@ -3213,28 +3229,28 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19] ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB1_6 ; GFX9-O0-NEXT: .LBB1_8: ; %udiv-bb1 @@ -3251,7 +3267,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1 ; GFX9-O0-NEXT: s_mov_b32 s5, s6 -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_waitcnt vmcnt(1) ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1 ; GFX9-O0-NEXT: s_mov_b32 s4, s7 ; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0 @@ -3262,6 +3278,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5 ; GFX9-O0-NEXT: v_add_co_u32_e32 v9, vcc, v4, v5 ; GFX9-O0-NEXT: v_mov_b32_e32 v5, s4 +; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v5, vcc ; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8 ; GFX9-O0-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc @@ -3278,12 +3295,12 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2 ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9 ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b32 s4, 0x7f ; GFX9-O0-NEXT: v_sub_u32_e64 v3, s4, v4 @@ -3331,10 +3348,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v8, v3 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2 ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v10 @@ -3351,16 +3368,16 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6 ; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[6:7], exec ; GFX9-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5] @@ -3601,7 +3618,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v2 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v3 ; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill @@ -3622,7 +3639,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v9 ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill @@ -3798,7 +3815,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], -1 ; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7] ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill @@ -3831,12 +3848,13 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(4) ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(4) ; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill @@ -3855,7 +3873,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill @@ -3869,9 +3887,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(2) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5 +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v6 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v7 ; GFX9-G-O0-NEXT: s_mov_b32 s4, 1 @@ -3910,7 +3929,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5 ; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill @@ -3933,12 +3952,12 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill @@ -3976,9 +3995,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0 -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(16) +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(18) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v2 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v3 +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(16) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v4 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5 ; GFX9-G-O0-NEXT: s_mov_b32 s8, 1 @@ -4028,9 +4048,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s8 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v0 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1 -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(8) +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(10) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v29, v31 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v30, v32 +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(8) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v33 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v34 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v29 @@ -4125,7 +4146,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v1 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v0 ; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill @@ -4134,7 +4155,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v13 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v12 ; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill @@ -4148,22 +4169,22 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19] ; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill @@ -4192,9 +4213,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b32 s4, 64 -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(2) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v5 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v4 +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v7 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v6 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s4 @@ -4270,22 +4292,22 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, s5 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s4 ; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill @@ -4323,7 +4345,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v8 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v7 ; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill @@ -4374,7 +4396,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v9 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v10 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill @@ -4388,7 +4410,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s4 ; GFX9-G-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[5:6], v[7:8] ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill @@ -4397,7 +4419,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s10 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s11 ; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_nop 0 ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill @@ -4420,10 +4442,13 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(3) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v5 +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(2) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v6 +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(1) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7 +; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v8 ; GFX9-G-O0-NEXT: ; kill: killed $vgpr4 ; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1 diff --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll index a33142f..b4fe112 100644 --- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll +++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll @@ -1581,7 +1581,6 @@ define amdgpu_kernel void @extract_neg_offset_vgpr(ptr addrspace(1) %out) { ; NOOPT-NEXT: v_mov_b32_e32 v15, v18 ; NOOPT-NEXT: v_mov_b32_e32 v16, v17 ; NOOPT-NEXT: buffer_store_dword v1, off, s[20:23], 0 offset:4 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v2, off, s[20:23], 0 offset:8 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[20:23], 0 offset:12 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v4, off, s[20:23], 0 offset:16 ; 4-byte Folded Spill @@ -4089,7 +4088,6 @@ define amdgpu_kernel void @insert_neg_offset_vgpr(ptr addrspace(1) %in, ptr addr ; NOOPT-NEXT: v_mov_b32_e32 v14, v18 ; NOOPT-NEXT: v_mov_b32_e32 v15, v17 ; NOOPT-NEXT: buffer_store_dword v0, off, s[20:23], 0 offset:72 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[20:23], 0 offset:76 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v2, off, s[20:23], 0 offset:80 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[20:23], 0 offset:84 ; 4-byte Folded Spill @@ -4115,7 +4113,6 @@ define amdgpu_kernel void @insert_neg_offset_vgpr(ptr addrspace(1) %in, ptr addr ; NOOPT-NEXT: s_mov_b64 exec, s[16:17] ; NOOPT-NEXT: s_waitcnt expcnt(2) ; NOOPT-NEXT: buffer_store_dword v0, off, s[20:23], 0 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[20:23], 0 offset:4 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v2, off, s[20:23], 0 offset:8 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[20:23], 0 offset:12 ; 4-byte Folded Spill @@ -4166,7 +4163,6 @@ define amdgpu_kernel void @insert_neg_offset_vgpr(ptr addrspace(1) %in, ptr addr ; NOOPT-NEXT: s_add_i32 m0, s2, 0xfffffe00 ; NOOPT-NEXT: v_movreld_b32_e32 v1, v17 ; NOOPT-NEXT: buffer_store_dword v1, off, s[20:23], 0 offset:140 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v2, off, s[20:23], 0 offset:144 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[20:23], 0 offset:148 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v4, off, s[20:23], 0 offset:152 ; 4-byte Folded Spill @@ -4184,7 +4180,6 @@ define amdgpu_kernel void @insert_neg_offset_vgpr(ptr addrspace(1) %in, ptr addr ; NOOPT-NEXT: buffer_store_dword v16, off, s[20:23], 0 offset:200 ; 4-byte Folded Spill ; NOOPT-NEXT: s_waitcnt expcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[20:23], 0 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v2, off, s[20:23], 0 offset:4 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[20:23], 0 offset:8 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v4, off, s[20:23], 0 offset:12 ; 4-byte Folded Spill @@ -4242,19 +4237,22 @@ define amdgpu_kernel void @insert_neg_offset_vgpr(ptr addrspace(1) %in, ptr addr ; NOOPT-NEXT: buffer_load_dword v29, off, s[20:23], 0 offset:192 ; 4-byte Folded Reload ; NOOPT-NEXT: buffer_load_dword v30, off, s[20:23], 0 offset:196 ; 4-byte Folded Reload ; NOOPT-NEXT: buffer_load_dword v31, off, s[20:23], 0 offset:200 ; 4-byte Folded Reload -; NOOPT-NEXT: s_waitcnt vmcnt(0) +; NOOPT-NEXT: s_waitcnt vmcnt(12) ; NOOPT-NEXT: v_mov_b32_e32 v5, v19 ; NOOPT-NEXT: v_mov_b32_e32 v6, v18 ; NOOPT-NEXT: v_mov_b32_e32 v7, v17 ; NOOPT-NEXT: v_mov_b32_e32 v1, v16 +; NOOPT-NEXT: s_waitcnt vmcnt(8) ; NOOPT-NEXT: v_mov_b32_e32 v2, v23 ; NOOPT-NEXT: v_mov_b32_e32 v3, v22 ; NOOPT-NEXT: v_mov_b32_e32 v4, v21 ; NOOPT-NEXT: v_mov_b32_e32 v8, v20 +; NOOPT-NEXT: s_waitcnt vmcnt(4) ; NOOPT-NEXT: v_mov_b32_e32 v13, v27 ; NOOPT-NEXT: v_mov_b32_e32 v14, v26 ; NOOPT-NEXT: v_mov_b32_e32 v15, v25 ; NOOPT-NEXT: v_mov_b32_e32 v9, v24 +; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: v_mov_b32_e32 v10, v31 ; NOOPT-NEXT: v_mov_b32_e32 v11, v30 ; NOOPT-NEXT: v_mov_b32_e32 v12, v29 @@ -4576,7 +4574,6 @@ define amdgpu_kernel void @insert_neg_inline_offset_vgpr(ptr addrspace(1) %in, p ; NOOPT-NEXT: v_mov_b32_e32 v14, v18 ; NOOPT-NEXT: v_mov_b32_e32 v15, v17 ; NOOPT-NEXT: buffer_store_dword v0, off, s[20:23], 0 offset:72 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[20:23], 0 offset:76 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v2, off, s[20:23], 0 offset:80 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[20:23], 0 offset:84 ; 4-byte Folded Spill @@ -4602,7 +4599,6 @@ define amdgpu_kernel void @insert_neg_inline_offset_vgpr(ptr addrspace(1) %in, p ; NOOPT-NEXT: s_mov_b64 exec, s[16:17] ; NOOPT-NEXT: s_waitcnt expcnt(2) ; NOOPT-NEXT: buffer_store_dword v0, off, s[20:23], 0 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[20:23], 0 offset:4 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v2, off, s[20:23], 0 offset:8 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[20:23], 0 offset:12 ; 4-byte Folded Spill @@ -4653,7 +4649,6 @@ define amdgpu_kernel void @insert_neg_inline_offset_vgpr(ptr addrspace(1) %in, p ; NOOPT-NEXT: s_add_i32 m0, s2, -16 ; NOOPT-NEXT: v_movreld_b32_e32 v1, v17 ; NOOPT-NEXT: buffer_store_dword v1, off, s[20:23], 0 offset:140 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v2, off, s[20:23], 0 offset:144 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[20:23], 0 offset:148 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v4, off, s[20:23], 0 offset:152 ; 4-byte Folded Spill @@ -4671,7 +4666,6 @@ define amdgpu_kernel void @insert_neg_inline_offset_vgpr(ptr addrspace(1) %in, p ; NOOPT-NEXT: buffer_store_dword v16, off, s[20:23], 0 offset:200 ; 4-byte Folded Spill ; NOOPT-NEXT: s_waitcnt expcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[20:23], 0 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v2, off, s[20:23], 0 offset:4 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[20:23], 0 offset:8 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v4, off, s[20:23], 0 offset:12 ; 4-byte Folded Spill @@ -4729,19 +4723,22 @@ define amdgpu_kernel void @insert_neg_inline_offset_vgpr(ptr addrspace(1) %in, p ; NOOPT-NEXT: buffer_load_dword v29, off, s[20:23], 0 offset:192 ; 4-byte Folded Reload ; NOOPT-NEXT: buffer_load_dword v30, off, s[20:23], 0 offset:196 ; 4-byte Folded Reload ; NOOPT-NEXT: buffer_load_dword v31, off, s[20:23], 0 offset:200 ; 4-byte Folded Reload -; NOOPT-NEXT: s_waitcnt vmcnt(0) +; NOOPT-NEXT: s_waitcnt vmcnt(12) ; NOOPT-NEXT: v_mov_b32_e32 v5, v19 ; NOOPT-NEXT: v_mov_b32_e32 v6, v18 ; NOOPT-NEXT: v_mov_b32_e32 v7, v17 ; NOOPT-NEXT: v_mov_b32_e32 v1, v16 +; NOOPT-NEXT: s_waitcnt vmcnt(8) ; NOOPT-NEXT: v_mov_b32_e32 v2, v23 ; NOOPT-NEXT: v_mov_b32_e32 v3, v22 ; NOOPT-NEXT: v_mov_b32_e32 v4, v21 ; NOOPT-NEXT: v_mov_b32_e32 v8, v20 +; NOOPT-NEXT: s_waitcnt vmcnt(4) ; NOOPT-NEXT: v_mov_b32_e32 v13, v27 ; NOOPT-NEXT: v_mov_b32_e32 v14, v26 ; NOOPT-NEXT: v_mov_b32_e32 v15, v25 ; NOOPT-NEXT: v_mov_b32_e32 v9, v24 +; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: v_mov_b32_e32 v10, v31 ; NOOPT-NEXT: v_mov_b32_e32 v11, v30 ; NOOPT-NEXT: v_mov_b32_e32 v12, v29 @@ -5154,7 +5151,6 @@ define amdgpu_kernel void @extract_vgpr_offset_multiple_in_block(ptr addrspace(1 ; NOOPT-NEXT: v_mov_b32_e32 v2, s1 ; NOOPT-NEXT: v_mov_b32_e32 v1, s0 ; NOOPT-NEXT: buffer_store_dword v1, off, s[36:39], 0 offset:4 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v2, off, s[36:39], 0 offset:8 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[36:39], 0 offset:12 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v4, off, s[36:39], 0 offset:16 ; 4-byte Folded Spill @@ -5272,7 +5268,6 @@ define amdgpu_kernel void @extract_vgpr_offset_multiple_in_block(ptr addrspace(1 ; NOOPT-NEXT: v_mov_b32_e32 v2, s1 ; NOOPT-NEXT: v_mov_b32_e32 v1, s0 ; NOOPT-NEXT: buffer_store_dword v1, off, s[36:39], 0 offset:88 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v2, off, s[36:39], 0 offset:92 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[36:39], 0 offset:96 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v4, off, s[36:39], 0 offset:100 ; 4-byte Folded Spill @@ -5893,7 +5888,6 @@ define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(ptr addrspace(1) ; NOOPT-NEXT: buffer_store_dword v16, off, s[28:31], 0 offset:64 ; 4-byte Folded Spill ; NOOPT-NEXT: s_mov_b64 exec, s[26:27] ; NOOPT-NEXT: buffer_store_dword v0, off, s[28:31], 0 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[28:31], 0 offset:4 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v2, off, s[28:31], 0 offset:8 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[28:31], 0 offset:12 ; 4-byte Folded Spill @@ -5944,7 +5938,6 @@ define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(ptr addrspace(1) ; NOOPT-NEXT: s_mov_b32 m0, s2 ; NOOPT-NEXT: v_movreld_b32_e32 v1, v17 ; NOOPT-NEXT: buffer_store_dword v1, off, s[28:31], 0 offset:88 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v2, off, s[28:31], 0 offset:92 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[28:31], 0 offset:96 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v4, off, s[28:31], 0 offset:100 ; 4-byte Folded Spill @@ -5962,7 +5955,6 @@ define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(ptr addrspace(1) ; NOOPT-NEXT: buffer_store_dword v16, off, s[28:31], 0 offset:148 ; 4-byte Folded Spill ; NOOPT-NEXT: s_waitcnt expcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[28:31], 0 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v2, off, s[28:31], 0 offset:4 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[28:31], 0 offset:8 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v4, off, s[28:31], 0 offset:12 ; 4-byte Folded Spill @@ -6025,7 +6017,6 @@ define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(ptr addrspace(1) ; NOOPT-NEXT: buffer_store_dword v16, off, s[28:31], 0 offset:64 ; 4-byte Folded Spill ; NOOPT-NEXT: s_mov_b64 exec, s[26:27] ; NOOPT-NEXT: buffer_store_dword v0, off, s[28:31], 0 offset:152 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[28:31], 0 offset:156 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v2, off, s[28:31], 0 offset:160 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[28:31], 0 offset:164 ; 4-byte Folded Spill @@ -6076,7 +6067,6 @@ define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(ptr addrspace(1) ; NOOPT-NEXT: s_mov_b32 m0, s2 ; NOOPT-NEXT: v_movreld_b32_e32 v1, v17 ; NOOPT-NEXT: buffer_store_dword v1, off, s[28:31], 0 offset:220 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v2, off, s[28:31], 0 offset:224 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[28:31], 0 offset:228 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v4, off, s[28:31], 0 offset:232 ; 4-byte Folded Spill @@ -6094,7 +6084,6 @@ define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(ptr addrspace(1) ; NOOPT-NEXT: buffer_store_dword v16, off, s[28:31], 0 offset:280 ; 4-byte Folded Spill ; NOOPT-NEXT: s_waitcnt expcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[28:31], 0 offset:152 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v2, off, s[28:31], 0 offset:156 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[28:31], 0 offset:160 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v4, off, s[28:31], 0 offset:164 ; 4-byte Folded Spill @@ -6154,19 +6143,22 @@ define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(ptr addrspace(1) ; NOOPT-NEXT: buffer_load_dword v30, off, s[28:31], 0 offset:272 ; 4-byte Folded Reload ; NOOPT-NEXT: buffer_load_dword v31, off, s[28:31], 0 offset:276 ; 4-byte Folded Reload ; NOOPT-NEXT: buffer_load_dword v32, off, s[28:31], 0 offset:280 ; 4-byte Folded Reload -; NOOPT-NEXT: s_waitcnt vmcnt(0) +; NOOPT-NEXT: s_waitcnt vmcnt(12) ; NOOPT-NEXT: v_mov_b32_e32 v6, v20 ; NOOPT-NEXT: v_mov_b32_e32 v7, v19 ; NOOPT-NEXT: v_mov_b32_e32 v8, v18 ; NOOPT-NEXT: v_mov_b32_e32 v2, v17 +; NOOPT-NEXT: s_waitcnt vmcnt(8) ; NOOPT-NEXT: v_mov_b32_e32 v3, v24 ; NOOPT-NEXT: v_mov_b32_e32 v4, v23 ; NOOPT-NEXT: v_mov_b32_e32 v5, v22 ; NOOPT-NEXT: v_mov_b32_e32 v9, v21 +; NOOPT-NEXT: s_waitcnt vmcnt(4) ; NOOPT-NEXT: v_mov_b32_e32 v14, v28 ; NOOPT-NEXT: v_mov_b32_e32 v15, v27 ; NOOPT-NEXT: v_mov_b32_e32 v16, v26 ; NOOPT-NEXT: v_mov_b32_e32 v10, v25 +; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: v_mov_b32_e32 v11, v32 ; NOOPT-NEXT: v_mov_b32_e32 v12, v31 ; NOOPT-NEXT: v_mov_b32_e32 v13, v30 @@ -9158,7 +9150,6 @@ define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) { ; NOOPT-NEXT: buffer_store_dword v16, off, s[24:27], 0 offset:4 ; 4-byte Folded Spill ; NOOPT-NEXT: s_mov_b64 exec, s[20:21] ; NOOPT-NEXT: buffer_store_dword v0, off, s[24:27], 0 offset:12 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[24:27], 0 offset:16 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v2, off, s[24:27], 0 offset:20 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[24:27], 0 offset:24 ; 4-byte Folded Spill @@ -9210,7 +9201,6 @@ define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) { ; NOOPT-NEXT: s_mov_b32 m0, s2 ; NOOPT-NEXT: v_movreld_b32_e32 v1, v17 ; NOOPT-NEXT: buffer_store_dword v1, off, s[24:27], 0 offset:84 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v2, off, s[24:27], 0 offset:88 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[24:27], 0 offset:92 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v4, off, s[24:27], 0 offset:96 ; 4-byte Folded Spill @@ -9228,7 +9218,6 @@ define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) { ; NOOPT-NEXT: buffer_store_dword v16, off, s[24:27], 0 offset:144 ; 4-byte Folded Spill ; NOOPT-NEXT: s_waitcnt expcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[24:27], 0 offset:12 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v2, off, s[24:27], 0 offset:16 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[24:27], 0 offset:20 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v4, off, s[24:27], 0 offset:24 ; 4-byte Folded Spill @@ -9282,7 +9271,7 @@ define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) { ; NOOPT-NEXT: buffer_load_dword v15, off, s[24:27], 0 offset:140 ; 4-byte Folded Reload ; NOOPT-NEXT: buffer_load_dword v16, off, s[24:27], 0 offset:144 ; 4-byte Folded Reload ; NOOPT-NEXT: s_mov_b64 s[0:1], 0 -; NOOPT-NEXT: s_waitcnt vmcnt(0) +; NOOPT-NEXT: s_waitcnt vmcnt(14) ; NOOPT-NEXT: buffer_store_dword v1, off, s[24:27], 0 offset:8 ; 4-byte Folded Spill ; NOOPT-NEXT: v_writelane_b32 v0, s0, 2 ; NOOPT-NEXT: v_writelane_b32 v0, s1, 3 @@ -9576,7 +9565,6 @@ define amdgpu_cs void @insert_or_disj_index(ptr addrspace(1) %out, ptr addrspace ; NOOPT-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec ; NOOPT-NEXT: v_mov_b32_e32 v1, v2 ; NOOPT-NEXT: buffer_store_dword v0, off, s[16:19], 0 offset:136 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[16:19], 0 offset:140 ; 4-byte Folded Spill ; NOOPT-NEXT: s_mov_b32 s8, 0xf000 ; NOOPT-NEXT: s_mov_b32 s0, 0 @@ -9632,7 +9620,6 @@ define amdgpu_cs void @insert_or_disj_index(ptr addrspace(1) %out, ptr addrspace ; NOOPT-NEXT: v_mov_b32_e32 v14, v18 ; NOOPT-NEXT: v_mov_b32_e32 v15, v17 ; NOOPT-NEXT: buffer_store_dword v0, off, s[16:19], 0 offset:68 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[16:19], 0 offset:72 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v2, off, s[16:19], 0 offset:76 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[16:19], 0 offset:80 ; 4-byte Folded Spill @@ -9656,7 +9643,6 @@ define amdgpu_cs void @insert_or_disj_index(ptr addrspace(1) %out, ptr addrspace ; NOOPT-NEXT: s_mov_b64 exec, s[12:13] ; NOOPT-NEXT: s_waitcnt expcnt(1) ; NOOPT-NEXT: buffer_store_dword v0, off, s[16:19], 0 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[16:19], 0 offset:4 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v2, off, s[16:19], 0 offset:8 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[16:19], 0 offset:12 ; 4-byte Folded Spill @@ -9707,7 +9693,6 @@ define amdgpu_cs void @insert_or_disj_index(ptr addrspace(1) %out, ptr addrspace ; NOOPT-NEXT: s_mov_b32 m0, s2 ; NOOPT-NEXT: v_movreld_b32_e32 v2, v17 ; NOOPT-NEXT: buffer_store_dword v1, off, s[16:19], 0 offset:148 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v2, off, s[16:19], 0 offset:152 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[16:19], 0 offset:156 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v4, off, s[16:19], 0 offset:160 ; 4-byte Folded Spill @@ -9725,7 +9710,6 @@ define amdgpu_cs void @insert_or_disj_index(ptr addrspace(1) %out, ptr addrspace ; NOOPT-NEXT: buffer_store_dword v16, off, s[16:19], 0 offset:208 ; 4-byte Folded Spill ; NOOPT-NEXT: s_waitcnt expcnt(0) ; NOOPT-NEXT: buffer_store_dword v1, off, s[16:19], 0 ; 4-byte Folded Spill -; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: buffer_store_dword v2, off, s[16:19], 0 offset:4 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v3, off, s[16:19], 0 offset:8 ; 4-byte Folded Spill ; NOOPT-NEXT: buffer_store_dword v4, off, s[16:19], 0 offset:12 ; 4-byte Folded Spill @@ -9785,19 +9769,22 @@ define amdgpu_cs void @insert_or_disj_index(ptr addrspace(1) %out, ptr addrspace ; NOOPT-NEXT: buffer_load_dword v31, off, s[16:19], 0 offset:200 ; 4-byte Folded Reload ; NOOPT-NEXT: buffer_load_dword v32, off, s[16:19], 0 offset:204 ; 4-byte Folded Reload ; NOOPT-NEXT: buffer_load_dword v33, off, s[16:19], 0 offset:208 ; 4-byte Folded Reload -; NOOPT-NEXT: s_waitcnt vmcnt(0) +; NOOPT-NEXT: s_waitcnt vmcnt(12) ; NOOPT-NEXT: v_mov_b32_e32 v7, v21 ; NOOPT-NEXT: v_mov_b32_e32 v8, v20 ; NOOPT-NEXT: v_mov_b32_e32 v9, v19 ; NOOPT-NEXT: v_mov_b32_e32 v1, v18 +; NOOPT-NEXT: s_waitcnt vmcnt(8) ; NOOPT-NEXT: v_mov_b32_e32 v2, v25 ; NOOPT-NEXT: v_mov_b32_e32 v3, v24 ; NOOPT-NEXT: v_mov_b32_e32 v4, v23 ; NOOPT-NEXT: v_mov_b32_e32 v10, v22 +; NOOPT-NEXT: s_waitcnt vmcnt(4) ; NOOPT-NEXT: v_mov_b32_e32 v15, v29 ; NOOPT-NEXT: v_mov_b32_e32 v16, v28 ; NOOPT-NEXT: v_mov_b32_e32 v17, v27 ; NOOPT-NEXT: v_mov_b32_e32 v11, v26 +; NOOPT-NEXT: s_waitcnt vmcnt(0) ; NOOPT-NEXT: v_mov_b32_e32 v12, v33 ; NOOPT-NEXT: v_mov_b32_e32 v13, v32 ; NOOPT-NEXT: v_mov_b32_e32 v14, v31 diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll index 4cc47b0..fe54270 100644 --- a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll +++ b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll @@ -3479,7 +3479,6 @@ define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v17, 0xffff, v15 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v15, 0xffff, v14 ; GCN-NOHSA-SI-NEXT: buffer_store_dword v15, off, s[12:15], 0 ; 4-byte Folded Spill -; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-SI-NEXT: buffer_store_dword v16, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill ; GCN-NOHSA-SI-NEXT: buffer_store_dword v17, off, s[12:15], 0 offset:8 ; 4-byte Folded Spill ; GCN-NOHSA-SI-NEXT: buffer_store_dword v18, off, s[12:15], 0 offset:12 ; 4-byte Folded Spill @@ -3487,7 +3486,6 @@ define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v18, 0xffff, v12 ; GCN-NOHSA-SI-NEXT: buffer_store_dword v18, off, s[12:15], 0 offset:16 ; 4-byte Folded Spill -; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-SI-NEXT: buffer_store_dword v19, off, s[12:15], 0 offset:20 ; 4-byte Folded Spill ; GCN-NOHSA-SI-NEXT: buffer_store_dword v20, off, s[12:15], 0 offset:24 ; 4-byte Folded Spill ; GCN-NOHSA-SI-NEXT: buffer_store_dword v21, off, s[12:15], 0 offset:28 ; 4-byte Folded Spill @@ -3498,6 +3496,7 @@ define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v23, 0xffff, v10 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v18, 0xffff, v9 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v16, 0xffff, v8 +; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(13) ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v15, 16, v7 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v13, 16, v6 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v46, 16, v5 @@ -3506,6 +3505,7 @@ define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v12, 0xffff, v6 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v45, 0xffff, v5 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v43, 0xffff, v4 +; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(12) ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v11, 16, v3 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v9, 16, v2 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v50, 16, v1 @@ -3514,6 +3514,7 @@ define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v8, 0xffff, v2 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v49, 0xffff, v1 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v47, 0xffff, v0 +; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(11) ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v54, 16, v30 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v52, 16, v29 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v58, 16, v28 @@ -3522,6 +3523,7 @@ define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v51, 0xffff, v29 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v57, 0xffff, v28 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v55, 0xffff, v27 +; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(10) ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v30, 16, v34 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v28, 16, v33 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v62, 16, v32 @@ -3530,6 +3532,7 @@ define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v27, 0xffff, v33 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v61, 0xffff, v32 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v59, 0xffff, v31 +; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(9) ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v34, 16, v38 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v32, 16, v37 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v7, 16, v36 @@ -3538,6 +3541,7 @@ define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v31, 0xffff, v37 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v6, 0xffff, v36 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v4, 0xffff, v35 +; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(8) ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v38, 16, v42 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v36, 16, v41 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v3, 16, v40 @@ -3563,7 +3567,6 @@ define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:32 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:48 ; GCN-NOHSA-SI-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:16 ; 4-byte Folded Reload -; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-SI-NEXT: buffer_load_dword v1, off, s[12:15], 0 offset:20 ; 4-byte Folded Reload ; GCN-NOHSA-SI-NEXT: buffer_load_dword v2, off, s[12:15], 0 offset:24 ; 4-byte Folded Reload ; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) @@ -3572,7 +3575,6 @@ define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) ; GCN-NOHSA-SI-NEXT: buffer_load_dword v0, off, s[12:15], 0 ; 4-byte Folded Reload -; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-SI-NEXT: buffer_load_dword v1, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload ; GCN-NOHSA-SI-NEXT: buffer_load_dword v2, off, s[12:15], 0 offset:8 ; 4-byte Folded Reload ; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) @@ -3806,7 +3808,6 @@ define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-VI-NEXT: v_and_b32_e32 v17, 0xffff, v15 ; GCN-NOHSA-VI-NEXT: v_and_b32_e32 v15, 0xffff, v14 ; GCN-NOHSA-VI-NEXT: buffer_store_dword v15, off, s[88:91], 0 ; 4-byte Folded Spill -; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-VI-NEXT: buffer_store_dword v16, off, s[88:91], 0 offset:4 ; 4-byte Folded Spill ; GCN-NOHSA-VI-NEXT: buffer_store_dword v17, off, s[88:91], 0 offset:8 ; 4-byte Folded Spill ; GCN-NOHSA-VI-NEXT: buffer_store_dword v18, off, s[88:91], 0 offset:12 ; 4-byte Folded Spill @@ -3815,10 +3816,10 @@ define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-VI-NEXT: v_and_b32_e32 v20, 0xffff, v13 ; GCN-NOHSA-VI-NEXT: v_and_b32_e32 v18, 0xffff, v12 ; GCN-NOHSA-VI-NEXT: buffer_store_dword v18, off, s[88:91], 0 offset:16 ; 4-byte Folded Spill -; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-VI-NEXT: buffer_store_dword v19, off, s[88:91], 0 offset:20 ; 4-byte Folded Spill ; GCN-NOHSA-VI-NEXT: buffer_store_dword v20, off, s[88:91], 0 offset:24 ; 4-byte Folded Spill ; GCN-NOHSA-VI-NEXT: buffer_store_dword v21, off, s[88:91], 0 offset:28 ; 4-byte Folded Spill +; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(14) ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v26, 16, v11 ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v24, 16, v10 ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v19, 16, v9 @@ -3827,6 +3828,7 @@ define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-VI-NEXT: v_and_b32_e32 v23, 0xffff, v10 ; GCN-NOHSA-VI-NEXT: v_and_b32_e32 v18, 0xffff, v9 ; GCN-NOHSA-VI-NEXT: v_and_b32_e32 v16, 0xffff, v8 +; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(12) ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v11, 16, v3 ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v9, 16, v2 ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v50, 16, v1 @@ -3835,6 +3837,7 @@ define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-VI-NEXT: v_and_b32_e32 v8, 0xffff, v2 ; GCN-NOHSA-VI-NEXT: v_and_b32_e32 v49, 0xffff, v1 ; GCN-NOHSA-VI-NEXT: v_and_b32_e32 v47, 0xffff, v0 +; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(8) ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v3, 16, v40 ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v1, 16, v39 ; GCN-NOHSA-VI-NEXT: v_and_b32_e32 v2, 0xffff, v40 @@ -4294,7 +4297,6 @@ define amdgpu_kernel void @global_sextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v2, v11, 0, 16 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v0, v10, 0, 16 ; GCN-NOHSA-SI-NEXT: buffer_store_dword v0, off, s[12:15], 0 ; 4-byte Folded Spill -; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-SI-NEXT: buffer_store_dword v1, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill ; GCN-NOHSA-SI-NEXT: buffer_store_dword v2, off, s[12:15], 0 offset:8 ; 4-byte Folded Spill ; GCN-NOHSA-SI-NEXT: buffer_store_dword v3, off, s[12:15], 0 offset:12 ; 4-byte Folded Spill @@ -4302,6 +4304,7 @@ define amdgpu_kernel void @global_sextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v5, 16, v8 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v6, v9, 0, 16 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v4, v8, 0, 16 +; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(6) ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v11, 16, v35 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v9, 16, v34 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v10, v35, 0, 16 @@ -4310,6 +4313,7 @@ define amdgpu_kernel void @global_sextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v13, 16, v32 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v14, v33, 0, 16 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v12, v32, 0, 16 +; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(5) ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v35, 16, v39 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v33, 16, v38 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v34, v39, 0, 16 @@ -4318,6 +4322,7 @@ define amdgpu_kernel void @global_sextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v45, 16, v36 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v46, v37, 0, 16 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v44, v36, 0, 16 +; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(4) ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v39, 16, v43 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v37, 16, v42 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v38, v43, 0, 16 @@ -4375,7 +4380,6 @@ define amdgpu_kernel void @global_sextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:48 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 ; GCN-NOHSA-SI-NEXT: buffer_load_dword v0, off, s[12:15], 0 ; 4-byte Folded Reload -; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-SI-NEXT: buffer_load_dword v1, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload ; GCN-NOHSA-SI-NEXT: buffer_load_dword v2, off, s[12:15], 0 offset:8 ; 4-byte Folded Reload ; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) @@ -4609,7 +4613,6 @@ define amdgpu_kernel void @global_sextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v17, v15, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v15, v14, 0, 16 ; GCN-NOHSA-VI-NEXT: buffer_store_dword v15, off, s[88:91], 0 ; 4-byte Folded Spill -; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-VI-NEXT: buffer_store_dword v16, off, s[88:91], 0 offset:4 ; 4-byte Folded Spill ; GCN-NOHSA-VI-NEXT: buffer_store_dword v17, off, s[88:91], 0 offset:8 ; 4-byte Folded Spill ; GCN-NOHSA-VI-NEXT: buffer_store_dword v18, off, s[88:91], 0 offset:12 ; 4-byte Folded Spill @@ -4618,10 +4621,10 @@ define amdgpu_kernel void @global_sextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v15, v13, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v13, v12, 0, 16 ; GCN-NOHSA-VI-NEXT: buffer_store_dword v13, off, s[88:91], 0 offset:16 ; 4-byte Folded Spill -; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-VI-NEXT: buffer_store_dword v14, off, s[88:91], 0 offset:20 ; 4-byte Folded Spill ; GCN-NOHSA-VI-NEXT: buffer_store_dword v15, off, s[88:91], 0 offset:24 ; 4-byte Folded Spill ; GCN-NOHSA-VI-NEXT: buffer_store_dword v16, off, s[88:91], 0 offset:28 ; 4-byte Folded Spill +; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(14) ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v19, 16, v11 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v17, 16, v10 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v18, v11, 0, 16 @@ -4630,6 +4633,7 @@ define amdgpu_kernel void @global_sextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v40, 16, v8 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v41, v9, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v39, v8, 0, 16 +; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(12) ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v11, 16, v3 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v9, 16, v2 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v10, v3, 0, 16 @@ -4638,6 +4642,7 @@ define amdgpu_kernel void @global_sextload_v64i16_to_v64i32(ptr addrspace(1) %ou ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v48, 16, v0 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v49, v1, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v47, v0, 0, 16 +; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(8) ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v3, 16, v36 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v1, 16, v35 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v2, v36, 0, 16 @@ -7277,7 +7282,6 @@ define amdgpu_kernel void @global_zextload_v32i16_to_v32i64(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v2, 16, v14 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v0, 0xffff, v14 ; GCN-NOHSA-SI-NEXT: buffer_store_dword v0, off, s[12:15], 0 ; 4-byte Folded Spill -; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-SI-NEXT: buffer_store_dword v1, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill ; GCN-NOHSA-SI-NEXT: buffer_store_dword v2, off, s[12:15], 0 offset:8 ; 4-byte Folded Spill ; GCN-NOHSA-SI-NEXT: buffer_store_dword v3, off, s[12:15], 0 offset:12 ; 4-byte Folded Spill @@ -7293,6 +7297,7 @@ define amdgpu_kernel void @global_zextload_v32i16_to_v32i64(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v16, 0xffff, v19 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v44, 16, v21 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v42, 0xffff, v21 +; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(5) ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v48, 16, v22 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v46, 0xffff, v22 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v22, 16, v24 @@ -7301,6 +7306,7 @@ define amdgpu_kernel void @global_zextload_v32i16_to_v32i64(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v50, 0xffff, v23 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v56, 16, v25 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v54, 0xffff, v25 +; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(4) ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v40, 16, v29 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v60, 16, v26 ; GCN-NOHSA-SI-NEXT: v_and_b32_e32 v58, 0xffff, v26 @@ -7341,19 +7347,17 @@ define amdgpu_kernel void @global_zextload_v32i16_to_v32i64(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, v39 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, v39 ; GCN-NOHSA-SI-NEXT: buffer_store_dword v12, off, s[12:15], 0 offset:16 ; 4-byte Folded Spill -; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-SI-NEXT: buffer_store_dword v13, off, s[12:15], 0 offset:20 ; 4-byte Folded Spill ; GCN-NOHSA-SI-NEXT: buffer_store_dword v14, off, s[12:15], 0 offset:24 ; 4-byte Folded Spill ; GCN-NOHSA-SI-NEXT: buffer_store_dword v15, off, s[12:15], 0 offset:28 ; 4-byte Folded Spill ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) ; GCN-NOHSA-SI-NEXT: buffer_load_dword v12, off, s[12:15], 0 ; 4-byte Folded Reload -; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-SI-NEXT: buffer_load_dword v13, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload ; GCN-NOHSA-SI-NEXT: buffer_load_dword v14, off, s[12:15], 0 offset:8 ; 4-byte Folded Reload ; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-SI-NEXT: buffer_load_dword v15, off, s[12:15], 0 offset:12 ; 4-byte Folded Reload -; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, v39 +; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, v39 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s0, s4 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s1, s5 @@ -7372,7 +7376,6 @@ define amdgpu_kernel void @global_zextload_v32i16_to_v32i64(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:96 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:64 ; GCN-NOHSA-SI-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:16 ; 4-byte Folded Reload -; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-SI-NEXT: buffer_load_dword v1, off, s[12:15], 0 offset:20 ; 4-byte Folded Reload ; GCN-NOHSA-SI-NEXT: buffer_load_dword v2, off, s[12:15], 0 offset:24 ; 4-byte Folded Reload ; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i32.ll b/llvm/test/CodeGen/AMDGPU/load-global-i32.ll index 7cdf270..5ae2b91 100644 --- a/llvm/test/CodeGen/AMDGPU/load-global-i32.ll +++ b/llvm/test/CodeGen/AMDGPU/load-global-i32.ll @@ -3032,10 +3032,10 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou ; SI-NOHSA-NEXT: v_mov_b32_e32 v44, v30 ; SI-NOHSA-NEXT: v_mov_b32_e32 v46, v31 ; SI-NOHSA-NEXT: buffer_store_dword v44, off, s[12:15], 0 ; 4-byte Folded Spill -; SI-NOHSA-NEXT: s_waitcnt vmcnt(0) ; SI-NOHSA-NEXT: buffer_store_dword v45, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill ; SI-NOHSA-NEXT: buffer_store_dword v46, off, s[12:15], 0 offset:8 ; 4-byte Folded Spill ; SI-NOHSA-NEXT: buffer_store_dword v47, off, s[12:15], 0 offset:12 ; 4-byte Folded Spill +; SI-NOHSA-NEXT: s_waitcnt vmcnt(9) ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v15, 31, v7 ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v13, 31, v6 ; SI-NOHSA-NEXT: s_waitcnt expcnt(0) @@ -3045,6 +3045,7 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou ; SI-NOHSA-NEXT: v_mov_b32_e32 v46, v5 ; SI-NOHSA-NEXT: v_mov_b32_e32 v12, v6 ; SI-NOHSA-NEXT: v_mov_b32_e32 v14, v7 +; SI-NOHSA-NEXT: s_waitcnt vmcnt(8) ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v7, 31, v3 ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v5, 31, v2 ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v51, 31, v1 @@ -3053,6 +3054,7 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou ; SI-NOHSA-NEXT: v_mov_b32_e32 v50, v1 ; SI-NOHSA-NEXT: v_mov_b32_e32 v4, v2 ; SI-NOHSA-NEXT: v_mov_b32_e32 v6, v3 +; SI-NOHSA-NEXT: s_waitcnt vmcnt(7) ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v3, 31, v19 ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v1, 31, v18 ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v55, 31, v17 @@ -3061,6 +3063,7 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou ; SI-NOHSA-NEXT: v_mov_b32_e32 v54, v17 ; SI-NOHSA-NEXT: v_mov_b32_e32 v0, v18 ; SI-NOHSA-NEXT: v_mov_b32_e32 v2, v19 +; SI-NOHSA-NEXT: s_waitcnt vmcnt(6) ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v19, 31, v23 ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v17, 31, v22 ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v59, 31, v21 @@ -3069,6 +3072,7 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou ; SI-NOHSA-NEXT: v_mov_b32_e32 v58, v21 ; SI-NOHSA-NEXT: v_mov_b32_e32 v16, v22 ; SI-NOHSA-NEXT: v_mov_b32_e32 v18, v23 +; SI-NOHSA-NEXT: s_waitcnt vmcnt(5) ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v23, 31, v27 ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v21, 31, v26 ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v63, 31, v25 @@ -3077,6 +3081,7 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou ; SI-NOHSA-NEXT: v_mov_b32_e32 v62, v25 ; SI-NOHSA-NEXT: v_mov_b32_e32 v20, v26 ; SI-NOHSA-NEXT: v_mov_b32_e32 v22, v27 +; SI-NOHSA-NEXT: s_waitcnt vmcnt(4) ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v27, 31, v11 ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v25, 31, v10 ; SI-NOHSA-NEXT: v_ashrrev_i32_e32 v31, 31, v9 @@ -3091,7 +3096,6 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou ; SI-NOHSA-NEXT: buffer_store_dwordx4 v[36:39], off, s[0:3], 0 offset:240 ; SI-NOHSA-NEXT: buffer_store_dwordx4 v[32:35], off, s[0:3], 0 offset:192 ; SI-NOHSA-NEXT: buffer_load_dword v8, off, s[12:15], 0 ; 4-byte Folded Reload -; SI-NOHSA-NEXT: s_waitcnt vmcnt(0) ; SI-NOHSA-NEXT: buffer_load_dword v9, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload ; SI-NOHSA-NEXT: buffer_load_dword v10, off, s[12:15], 0 offset:8 ; 4-byte Folded Reload ; SI-NOHSA-NEXT: s_waitcnt vmcnt(0) @@ -3614,10 +3618,11 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou ; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v4, v0 ; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v6, v1 ; GCN-GFX900-HSA-NEXT: buffer_store_dword v25, off, s[16:19], 0 ; 4-byte Folded Spill -; GCN-GFX900-HSA-NEXT: s_waitcnt vmcnt(0) +; GCN-GFX900-HSA-NEXT: s_nop 0 ; GCN-GFX900-HSA-NEXT: buffer_store_dword v26, off, s[16:19], 0 offset:4 ; 4-byte Folded Spill ; GCN-GFX900-HSA-NEXT: buffer_store_dword v27, off, s[16:19], 0 offset:8 ; 4-byte Folded Spill ; GCN-GFX900-HSA-NEXT: buffer_store_dword v28, off, s[16:19], 0 offset:12 ; 4-byte Folded Spill +; GCN-GFX900-HSA-NEXT: s_waitcnt vmcnt(7) ; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v28, 31, v12 ; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v26, 31, v11 ; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v40, 31, v10 @@ -3626,6 +3631,7 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou ; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v39, v10 ; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v25, v11 ; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v27, v12 +; GCN-GFX900-HSA-NEXT: s_waitcnt vmcnt(6) ; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v12, 31, v16 ; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v10, 31, v15 ; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v44, 31, v14 @@ -3634,6 +3640,7 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou ; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v43, v14 ; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v9, v15 ; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v11, v16 +; GCN-GFX900-HSA-NEXT: s_waitcnt vmcnt(5) ; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v16, 31, v20 ; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v14, 31, v19 ; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v48, 31, v18 @@ -3643,6 +3650,7 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou ; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v13, v19 ; GCN-GFX900-HSA-NEXT: global_load_dwordx4 v[49:52], v8, s[2:3] offset:16 ; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v15, v20 +; GCN-GFX900-HSA-NEXT: s_waitcnt vmcnt(5) ; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v20, 31, v24 ; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v18, 31, v23 ; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v56, 31, v22 diff --git a/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands-non-ptr-intrinsics.ll b/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands-non-ptr-intrinsics.ll index 06ebd86..85d342b 100644 --- a/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands-non-ptr-intrinsics.ll +++ b/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands-non-ptr-intrinsics.ll @@ -162,12 +162,13 @@ define float @mubuf_vgpr(<4 x i32> %i, i32 %c) #0 { ; W64-O0-NEXT: v_mov_b32_e32 v3, v6 ; W64-O0-NEXT: v_mov_b32_e32 v4, v5 ; W64-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; W64-O0-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7 ; W64-O0-NEXT: s_mov_b32 s4, 0 +; W64-O0-NEXT: s_waitcnt vmcnt(4) ; W64-O0-NEXT: v_writelane_b32 v0, s4, 0 ; W64-O0-NEXT: s_mov_b64 s[4:5], exec ; W64-O0-NEXT: v_writelane_b32 v0, s4, 1 @@ -183,13 +184,16 @@ define float @mubuf_vgpr(<4 x i32> %i, i32 %c) #0 { ; W64-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_waitcnt vmcnt(3) ; W64-O0-NEXT: v_readfirstlane_b32 s8, v1 +; W64-O0-NEXT: s_waitcnt vmcnt(2) ; W64-O0-NEXT: v_readfirstlane_b32 s12, v2 ; W64-O0-NEXT: s_mov_b32 s4, s8 ; W64-O0-NEXT: s_mov_b32 s5, s12 ; W64-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], s[4:5], v[1:2] +; W64-O0-NEXT: s_waitcnt vmcnt(1) ; W64-O0-NEXT: v_readfirstlane_b32 s7, v3 +; W64-O0-NEXT: s_waitcnt vmcnt(0) ; W64-O0-NEXT: v_readfirstlane_b32 s6, v4 ; W64-O0-NEXT: s_mov_b32 s10, s7 ; W64-O0-NEXT: s_mov_b32 s11, s6 @@ -524,7 +528,7 @@ define void @mubuf_vgpr_adjacent_in_block(<4 x i32> %i, <4 x i32> %j, i32 %c, pt ; W64-O0-NEXT: s_waitcnt vmcnt(3) ; W64-O0-NEXT: v_mov_b32_e32 v16, v4 ; W64-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill @@ -537,7 +541,7 @@ define void @mubuf_vgpr_adjacent_in_block(<4 x i32> %i, <4 x i32> %j, i32 %c, pt ; W64-O0-NEXT: v_mov_b32_e32 v5, v8 ; W64-O0-NEXT: v_mov_b32_e32 v6, v7 ; W64-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill @@ -545,21 +549,24 @@ define void @mubuf_vgpr_adjacent_in_block(<4 x i32> %i, <4 x i32> %j, i32 %c, pt ; W64-O0-NEXT: ; implicit-def: $sgpr4 ; W64-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec ; W64-O0-NEXT: v_mov_b32_e32 v3, v12 +; W64-O0-NEXT: s_waitcnt vmcnt(10) ; W64-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; W64-O0-NEXT: ; implicit-def: $sgpr4 ; W64-O0-NEXT: ; implicit-def: $sgpr4 ; W64-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec ; W64-O0-NEXT: v_mov_b32_e32 v2, v10 +; W64-O0-NEXT: s_waitcnt vmcnt(11) ; W64-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; W64-O0-NEXT: ; implicit-def: $sgpr4_sgpr5 ; W64-O0-NEXT: ; implicit-def: $sgpr4_sgpr5 ; W64-O0-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7 ; W64-O0-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7 ; W64-O0-NEXT: s_mov_b32 s4, 0 +; W64-O0-NEXT: s_waitcnt vmcnt(12) ; W64-O0-NEXT: v_writelane_b32 v0, s4, 0 ; W64-O0-NEXT: s_mov_b64 s[4:5], exec ; W64-O0-NEXT: v_writelane_b32 v0, s4, 1 @@ -575,13 +582,16 @@ define void @mubuf_vgpr_adjacent_in_block(<4 x i32> %i, <4 x i32> %j, i32 %c, pt ; W64-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_waitcnt vmcnt(3) ; W64-O0-NEXT: v_readfirstlane_b32 s8, v1 +; W64-O0-NEXT: s_waitcnt vmcnt(2) ; W64-O0-NEXT: v_readfirstlane_b32 s12, v2 ; W64-O0-NEXT: s_mov_b32 s4, s8 ; W64-O0-NEXT: s_mov_b32 s5, s12 ; W64-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], s[4:5], v[1:2] +; W64-O0-NEXT: s_waitcnt vmcnt(1) ; W64-O0-NEXT: v_readfirstlane_b32 s7, v3 +; W64-O0-NEXT: s_waitcnt vmcnt(0) ; W64-O0-NEXT: v_readfirstlane_b32 s6, v4 ; W64-O0-NEXT: s_mov_b32 s10, s7 ; W64-O0-NEXT: s_mov_b32 s11, s6 @@ -643,13 +653,16 @@ define void @mubuf_vgpr_adjacent_in_block(<4 x i32> %i, <4 x i32> %j, i32 %c, pt ; W64-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_waitcnt vmcnt(3) ; W64-O0-NEXT: v_readfirstlane_b32 s8, v1 +; W64-O0-NEXT: s_waitcnt vmcnt(2) ; W64-O0-NEXT: v_readfirstlane_b32 s12, v2 ; W64-O0-NEXT: s_mov_b32 s4, s8 ; W64-O0-NEXT: s_mov_b32 s5, s12 ; W64-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], s[4:5], v[1:2] +; W64-O0-NEXT: s_waitcnt vmcnt(1) ; W64-O0-NEXT: v_readfirstlane_b32 s7, v3 +; W64-O0-NEXT: s_waitcnt vmcnt(0) ; W64-O0-NEXT: v_readfirstlane_b32 s6, v4 ; W64-O0-NEXT: s_mov_b32 s10, s7 ; W64-O0-NEXT: s_mov_b32 s11, s6 @@ -1055,7 +1068,7 @@ define void @mubuf_vgpr_outside_entry(<4 x i32> %i, <4 x i32> %j, i32 %c, ptr ad ; W64-O0-NEXT: v_mov_b32_e32 v8, v11 ; W64-O0-NEXT: v_mov_b32_e32 v9, v10 ; W64-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill @@ -1063,11 +1076,13 @@ define void @mubuf_vgpr_outside_entry(<4 x i32> %i, <4 x i32> %j, i32 %c, ptr ad ; W64-O0-NEXT: ; implicit-def: $sgpr4 ; W64-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec ; W64-O0-NEXT: v_mov_b32_e32 v6, v12 +; W64-O0-NEXT: s_waitcnt vmcnt(7) ; W64-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; W64-O0-NEXT: s_waitcnt vmcnt(7) ; W64-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill @@ -1076,6 +1091,7 @@ define void @mubuf_vgpr_outside_entry(<4 x i32> %i, <4 x i32> %j, i32 %c, ptr ad ; W64-O0-NEXT: s_mov_b32 s4, 17 ; W64-O0-NEXT: ;;#ASMEND ; W64-O0-NEXT: s_mov_b32 s5, s4 +; W64-O0-NEXT: s_waitcnt vmcnt(10) ; W64-O0-NEXT: v_writelane_b32 v0, s5, 0 ; W64-O0-NEXT: s_mov_b32 s5, 0 ; W64-O0-NEXT: v_writelane_b32 v0, s5, 1 @@ -1095,13 +1111,16 @@ define void @mubuf_vgpr_outside_entry(<4 x i32> %i, <4 x i32> %j, i32 %c, ptr ad ; W64-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_waitcnt vmcnt(3) ; W64-O0-NEXT: v_readfirstlane_b32 s8, v1 +; W64-O0-NEXT: s_waitcnt vmcnt(2) ; W64-O0-NEXT: v_readfirstlane_b32 s12, v2 ; W64-O0-NEXT: s_mov_b32 s4, s8 ; W64-O0-NEXT: s_mov_b32 s5, s12 ; W64-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], s[4:5], v[1:2] +; W64-O0-NEXT: s_waitcnt vmcnt(1) ; W64-O0-NEXT: v_readfirstlane_b32 s7, v3 +; W64-O0-NEXT: s_waitcnt vmcnt(0) ; W64-O0-NEXT: v_readfirstlane_b32 s6, v4 ; W64-O0-NEXT: s_mov_b32 s10, s7 ; W64-O0-NEXT: s_mov_b32 s11, s6 @@ -1191,13 +1210,16 @@ define void @mubuf_vgpr_outside_entry(<4 x i32> %i, <4 x i32> %j, i32 %c, ptr ad ; W64-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_waitcnt vmcnt(3) ; W64-O0-NEXT: v_readfirstlane_b32 s8, v1 +; W64-O0-NEXT: s_waitcnt vmcnt(2) ; W64-O0-NEXT: v_readfirstlane_b32 s12, v2 ; W64-O0-NEXT: s_mov_b32 s4, s8 ; W64-O0-NEXT: s_mov_b32 s5, s12 ; W64-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], s[4:5], v[1:2] +; W64-O0-NEXT: s_waitcnt vmcnt(1) ; W64-O0-NEXT: v_readfirstlane_b32 s7, v3 +; W64-O0-NEXT: s_waitcnt vmcnt(0) ; W64-O0-NEXT: v_readfirstlane_b32 s6, v4 ; W64-O0-NEXT: s_mov_b32 s10, s7 ; W64-O0-NEXT: s_mov_b32 s11, s6 diff --git a/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll b/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll index 2591ff4..42ed4c1 100644 --- a/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll +++ b/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll @@ -175,13 +175,14 @@ define float @mubuf_vgpr(ptr addrspace(8) %i, i32 %c) #0 { ; W64-O0-NEXT: v_mov_b32_e32 v3, v6 ; W64-O0-NEXT: v_mov_b32_e32 v4, v5 ; W64-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; W64-O0-NEXT: ; implicit-def: $sgpr4_sgpr5 ; W64-O0-NEXT: ; implicit-def: $sgpr4_sgpr5 ; W64-O0-NEXT: s_mov_b32 s4, 0 +; W64-O0-NEXT: s_waitcnt vmcnt(4) ; W64-O0-NEXT: v_writelane_b32 v0, s4, 0 ; W64-O0-NEXT: s_mov_b64 s[4:5], exec ; W64-O0-NEXT: v_writelane_b32 v0, s4, 1 @@ -197,13 +198,16 @@ define float @mubuf_vgpr(ptr addrspace(8) %i, i32 %c) #0 { ; W64-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_waitcnt vmcnt(3) ; W64-O0-NEXT: v_readfirstlane_b32 s8, v1 +; W64-O0-NEXT: s_waitcnt vmcnt(2) ; W64-O0-NEXT: v_readfirstlane_b32 s12, v2 ; W64-O0-NEXT: s_mov_b32 s4, s8 ; W64-O0-NEXT: s_mov_b32 s5, s12 ; W64-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], s[4:5], v[1:2] +; W64-O0-NEXT: s_waitcnt vmcnt(1) ; W64-O0-NEXT: v_readfirstlane_b32 s7, v3 +; W64-O0-NEXT: s_waitcnt vmcnt(0) ; W64-O0-NEXT: v_readfirstlane_b32 s6, v4 ; W64-O0-NEXT: s_mov_b32 s10, s7 ; W64-O0-NEXT: s_mov_b32 s11, s6 @@ -549,7 +553,7 @@ define void @mubuf_vgpr_adjacent_in_block(ptr addrspace(8) %i, ptr addrspace(8) ; W64-O0-NEXT: v_mov_b32_e32 v15, v7 ; W64-O0-NEXT: v_mov_b32_e32 v16, v6 ; W64-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill @@ -574,7 +578,7 @@ define void @mubuf_vgpr_adjacent_in_block(ptr addrspace(8) %i, ptr addrspace(8) ; W64-O0-NEXT: v_mov_b32_e32 v5, v8 ; W64-O0-NEXT: v_mov_b32_e32 v6, v7 ; W64-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill @@ -582,15 +586,17 @@ define void @mubuf_vgpr_adjacent_in_block(ptr addrspace(8) %i, ptr addrspace(8) ; W64-O0-NEXT: ; implicit-def: $sgpr4 ; W64-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec ; W64-O0-NEXT: v_mov_b32_e32 v3, v12 +; W64-O0-NEXT: s_waitcnt vmcnt(10) ; W64-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; W64-O0-NEXT: ; implicit-def: $sgpr4 ; W64-O0-NEXT: ; implicit-def: $sgpr4 ; W64-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec ; W64-O0-NEXT: v_mov_b32_e32 v2, v10 +; W64-O0-NEXT: s_waitcnt vmcnt(11) ; W64-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; W64-O0-NEXT: ; implicit-def: $sgpr4_sgpr5 ; W64-O0-NEXT: ; implicit-def: $sgpr4_sgpr5 @@ -599,6 +605,7 @@ define void @mubuf_vgpr_adjacent_in_block(ptr addrspace(8) %i, ptr addrspace(8) ; W64-O0-NEXT: ; implicit-def: $sgpr4_sgpr5 ; W64-O0-NEXT: ; implicit-def: $sgpr4_sgpr5 ; W64-O0-NEXT: s_mov_b32 s4, 0 +; W64-O0-NEXT: s_waitcnt vmcnt(12) ; W64-O0-NEXT: v_writelane_b32 v0, s4, 0 ; W64-O0-NEXT: s_mov_b64 s[4:5], exec ; W64-O0-NEXT: v_writelane_b32 v0, s4, 1 @@ -614,13 +621,16 @@ define void @mubuf_vgpr_adjacent_in_block(ptr addrspace(8) %i, ptr addrspace(8) ; W64-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_waitcnt vmcnt(3) ; W64-O0-NEXT: v_readfirstlane_b32 s8, v1 +; W64-O0-NEXT: s_waitcnt vmcnt(2) ; W64-O0-NEXT: v_readfirstlane_b32 s12, v2 ; W64-O0-NEXT: s_mov_b32 s4, s8 ; W64-O0-NEXT: s_mov_b32 s5, s12 ; W64-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], s[4:5], v[1:2] +; W64-O0-NEXT: s_waitcnt vmcnt(1) ; W64-O0-NEXT: v_readfirstlane_b32 s7, v3 +; W64-O0-NEXT: s_waitcnt vmcnt(0) ; W64-O0-NEXT: v_readfirstlane_b32 s6, v4 ; W64-O0-NEXT: s_mov_b32 s10, s7 ; W64-O0-NEXT: s_mov_b32 s11, s6 @@ -682,13 +692,16 @@ define void @mubuf_vgpr_adjacent_in_block(ptr addrspace(8) %i, ptr addrspace(8) ; W64-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_waitcnt vmcnt(3) ; W64-O0-NEXT: v_readfirstlane_b32 s8, v1 +; W64-O0-NEXT: s_waitcnt vmcnt(2) ; W64-O0-NEXT: v_readfirstlane_b32 s12, v2 ; W64-O0-NEXT: s_mov_b32 s4, s8 ; W64-O0-NEXT: s_mov_b32 s5, s12 ; W64-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], s[4:5], v[1:2] +; W64-O0-NEXT: s_waitcnt vmcnt(1) ; W64-O0-NEXT: v_readfirstlane_b32 s7, v3 +; W64-O0-NEXT: s_waitcnt vmcnt(0) ; W64-O0-NEXT: v_readfirstlane_b32 s6, v4 ; W64-O0-NEXT: s_mov_b32 s10, s7 ; W64-O0-NEXT: s_mov_b32 s11, s6 @@ -1101,7 +1114,7 @@ define void @mubuf_vgpr_outside_entry(ptr addrspace(8) %i, ptr addrspace(8) %j, ; W64-O0-NEXT: v_mov_b32_e32 v10, v6 ; W64-O0-NEXT: v_mov_b32_e32 v11, v4 ; W64-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill @@ -1112,26 +1125,29 @@ define void @mubuf_vgpr_outside_entry(ptr addrspace(8) %i, ptr addrspace(8) %j, ; W64-O0-NEXT: ; implicit-def: $sgpr4 ; W64-O0-NEXT: ; implicit-def: $sgpr4 ; W64-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec +; W64-O0-NEXT: s_waitcnt vmcnt(6) ; W64-O0-NEXT: v_mov_b32_e32 v4, v2 ; W64-O0-NEXT: ; implicit-def: $sgpr4 ; W64-O0-NEXT: ; implicit-def: $sgpr4 ; W64-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec ; W64-O0-NEXT: v_mov_b32_e32 v2, v12 ; W64-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill ; W64-O0-NEXT: ; implicit-def: $sgpr4_sgpr5 ; W64-O0-NEXT: ; implicit-def: $sgpr4_sgpr5 +; W64-O0-NEXT: s_waitcnt vmcnt(9) ; W64-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill ; W64-O0-NEXT: ;;#ASMSTART ; W64-O0-NEXT: s_mov_b32 s4, 17 ; W64-O0-NEXT: ;;#ASMEND ; W64-O0-NEXT: s_mov_b32 s5, s4 +; W64-O0-NEXT: s_waitcnt vmcnt(10) ; W64-O0-NEXT: v_writelane_b32 v0, s5, 0 ; W64-O0-NEXT: s_mov_b32 s5, 0 ; W64-O0-NEXT: v_writelane_b32 v0, s5, 1 @@ -1151,13 +1167,16 @@ define void @mubuf_vgpr_outside_entry(ptr addrspace(8) %i, ptr addrspace(8) %j, ; W64-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_waitcnt vmcnt(3) ; W64-O0-NEXT: v_readfirstlane_b32 s8, v1 +; W64-O0-NEXT: s_waitcnt vmcnt(2) ; W64-O0-NEXT: v_readfirstlane_b32 s12, v2 ; W64-O0-NEXT: s_mov_b32 s4, s8 ; W64-O0-NEXT: s_mov_b32 s5, s12 ; W64-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], s[4:5], v[1:2] +; W64-O0-NEXT: s_waitcnt vmcnt(1) ; W64-O0-NEXT: v_readfirstlane_b32 s7, v3 +; W64-O0-NEXT: s_waitcnt vmcnt(0) ; W64-O0-NEXT: v_readfirstlane_b32 s6, v4 ; W64-O0-NEXT: s_mov_b32 s10, s7 ; W64-O0-NEXT: s_mov_b32 s11, s6 @@ -1246,7 +1265,7 @@ define void @mubuf_vgpr_outside_entry(ptr addrspace(8) %i, ptr addrspace(8) %j, ; W64-O0-NEXT: v_mov_b32_e32 v3, v6 ; W64-O0-NEXT: v_mov_b32_e32 v4, v5 ; W64-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_nop 0 ; W64-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; W64-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill @@ -1268,13 +1287,16 @@ define void @mubuf_vgpr_outside_entry(ptr addrspace(8) %i, ptr addrspace(8) %j, ; W64-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload ; W64-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; W64-O0-NEXT: s_waitcnt vmcnt(0) +; W64-O0-NEXT: s_waitcnt vmcnt(3) ; W64-O0-NEXT: v_readfirstlane_b32 s8, v1 +; W64-O0-NEXT: s_waitcnt vmcnt(2) ; W64-O0-NEXT: v_readfirstlane_b32 s12, v2 ; W64-O0-NEXT: s_mov_b32 s4, s8 ; W64-O0-NEXT: s_mov_b32 s5, s12 ; W64-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], s[4:5], v[1:2] +; W64-O0-NEXT: s_waitcnt vmcnt(1) ; W64-O0-NEXT: v_readfirstlane_b32 s7, v3 +; W64-O0-NEXT: s_waitcnt vmcnt(0) ; W64-O0-NEXT: v_readfirstlane_b32 s6, v4 ; W64-O0-NEXT: s_mov_b32 s10, s7 ; W64-O0-NEXT: s_mov_b32 s11, s6 diff --git a/llvm/test/CodeGen/AMDGPU/preserve-wwm-copy-dst-reg.ll b/llvm/test/CodeGen/AMDGPU/preserve-wwm-copy-dst-reg.ll index fbe34a3..bbeb2e1 100644 --- a/llvm/test/CodeGen/AMDGPU/preserve-wwm-copy-dst-reg.ll +++ b/llvm/test/CodeGen/AMDGPU/preserve-wwm-copy-dst-reg.ll @@ -44,7 +44,7 @@ define void @preserve_wwm_copy_dstreg(ptr %parg0, ptr %parg1, ptr %parg2) #0 { ; GFX906-NEXT: v_writelane_b32 v41, s31, 1 ; GFX906-NEXT: v_mov_b32_e32 v32, v31 ; GFX906-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill -; GFX906-NEXT: s_waitcnt vmcnt(0) +; GFX906-NEXT: s_nop 0 ; GFX906-NEXT: buffer_store_dword v1, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill ; GFX906-NEXT: v_writelane_b32 v2, s5, 11 ; GFX906-NEXT: s_or_saveexec_b64 s[34:35], -1 @@ -54,7 +54,7 @@ define void @preserve_wwm_copy_dstreg(ptr %parg0, ptr %parg1, ptr %parg2) #0 { ; GFX906-NEXT: ; def v[0:31] ; GFX906-NEXT: ;;#ASMEND ; GFX906-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:16 ; 4-byte Folded Spill -; GFX906-NEXT: s_waitcnt vmcnt(0) +; GFX906-NEXT: s_nop 0 ; GFX906-NEXT: buffer_store_dword v1, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill ; GFX906-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill ; GFX906-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:28 ; 4-byte Folded Spill @@ -425,7 +425,7 @@ define void @preserve_wwm_copy_dstreg(ptr %parg0, ptr %parg1, ptr %parg2) #0 { ; GFX908-NEXT: v_writelane_b32 v2, s4, 10 ; GFX908-NEXT: v_mov_b32_e32 v32, v31 ; GFX908-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill -; GFX908-NEXT: s_waitcnt vmcnt(0) +; GFX908-NEXT: s_nop 0 ; GFX908-NEXT: buffer_store_dword v1, off, s[0:3], s33 offset:16 ; 4-byte Folded Spill ; GFX908-NEXT: v_writelane_b32 v2, s5, 11 ; GFX908-NEXT: s_or_saveexec_b64 s[34:35], -1 @@ -435,7 +435,7 @@ define void @preserve_wwm_copy_dstreg(ptr %parg0, ptr %parg1, ptr %parg2) #0 { ; GFX908-NEXT: ; def v[0:31] ; GFX908-NEXT: ;;#ASMEND ; GFX908-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill -; GFX908-NEXT: s_waitcnt vmcnt(0) +; GFX908-NEXT: s_nop 0 ; GFX908-NEXT: buffer_store_dword v1, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill ; GFX908-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:28 ; 4-byte Folded Spill ; GFX908-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill diff --git a/llvm/test/CodeGen/AMDGPU/rem_i128.ll b/llvm/test/CodeGen/AMDGPU/rem_i128.ll index fe093d4..fd6e06a 100644 --- a/llvm/test/CodeGen/AMDGPU/rem_i128.ll +++ b/llvm/test/CodeGen/AMDGPU/rem_i128.ll @@ -282,16 +282,17 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v13 ; GFX9-O0-NEXT: v_ashrrev_i64 v[3:4], s4, v[3:4] ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1 ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2 ; GFX9-O0-NEXT: v_mov_b32_e32 v1, v12 ; GFX9-O0-NEXT: v_mov_b32_e32 v2, v13 ; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0 +; GFX9-O0-NEXT: s_waitcnt vmcnt(4) ; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 0 ; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 1 ; GFX9-O0-NEXT: s_mov_b32 s10, s6 @@ -371,42 +372,42 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v14, v6 ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v5 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v16 ; GFX9-O0-NEXT: v_mov_b32_e32 v14, v17 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v14, v12 ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v11 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v18 ; GFX9-O0-NEXT: v_mov_b32_e32 v14, v19 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v14, v12 ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v11 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v18 ; GFX9-O0-NEXT: v_mov_b32_e32 v14, v19 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v14, v6 ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v5 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v16 ; GFX9-O0-NEXT: v_mov_b32_e32 v14, v17 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v14, v12 ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v19 @@ -521,14 +522,14 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v9 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7 ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], s[6:7] ; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f @@ -576,10 +577,10 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v5 ; GFX9-O0-NEXT: s_and_b64 s[6:7], s[4:5], s[6:7] ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[4:5], exec ; GFX9-O0-NEXT: v_writelane_b32 v0, s4, 4 @@ -610,16 +611,19 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB0_5 ; GFX9-O0-NEXT: .LBB0_3: ; %Flow2 @@ -636,10 +640,10 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB0_9 ; GFX9-O0-NEXT: .LBB0_4: ; %udiv-loop-exit @@ -676,10 +680,10 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v4 ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB0_3 ; GFX9-O0-NEXT: .LBB0_5: ; %Flow1 @@ -700,16 +704,16 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB0_4 ; GFX9-O0-NEXT: .LBB0_6: ; %udiv-do-while @@ -794,11 +798,12 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5 ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v23 ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24 -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_waitcnt vmcnt(1) ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v11 ; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14 ; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15 ; GFX9-O0-NEXT: v_sub_co_u32_e32 v13, vcc, v13, v6 +; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: v_subb_co_u32_e32 v12, vcc, v12, v10, vcc ; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v11, v4, vcc ; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v5, vcc @@ -884,22 +889,22 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v3 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v2 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v1 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v0 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v14 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v13 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5] ; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 6 @@ -911,28 +916,28 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 exec, s[22:23] ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX9-O0-NEXT: s_cbranch_execnz .LBB0_6 @@ -957,7 +962,7 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; GFX9-O0-NEXT: s_waitcnt vmcnt(9) +; GFX9-O0-NEXT: s_waitcnt vmcnt(10) ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10 ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], v4, v[21:22] @@ -1030,10 +1035,10 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v17 ; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7] ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[6:7] ; GFX9-O0-NEXT: v_mov_b32_e32 v15, s9 @@ -1046,28 +1051,28 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 exec, s[22:23] ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB0_6 ; GFX9-O0-NEXT: .LBB0_8: ; %udiv-bb1 @@ -1084,7 +1089,7 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1 ; GFX9-O0-NEXT: s_mov_b32 s5, s6 -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_waitcnt vmcnt(1) ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1 ; GFX9-O0-NEXT: s_mov_b32 s4, s7 ; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0 @@ -1095,6 +1100,7 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5 ; GFX9-O0-NEXT: v_add_co_u32_e32 v9, vcc, v4, v5 ; GFX9-O0-NEXT: v_mov_b32_e32 v5, s4 +; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v5, vcc ; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8 ; GFX9-O0-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc @@ -1111,12 +1117,12 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2 ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9 ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b32 s4, 0x7f ; GFX9-O0-NEXT: v_sub_u32_e64 v3, s4, v4 @@ -1164,10 +1170,10 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v8, v3 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2 ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v10 @@ -1184,16 +1190,16 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6 ; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[6:7], exec ; GFX9-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5] @@ -1230,9 +1236,10 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: s_waitcnt vmcnt(2) ; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[5:6] ; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0 -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_waitcnt vmcnt(1) ; GFX9-O0-NEXT: v_mov_b32_e32 v0, v17 ; GFX9-O0-NEXT: v_mul_lo_u32 v3, v1, v0 +; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: v_lshrrev_b64 v[17:18], s4, v[17:18] ; GFX9-O0-NEXT: v_mov_b32_e32 v2, v17 ; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec @@ -1756,42 +1763,43 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13 ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9 ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11 ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v12 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_waitcnt vmcnt(7) ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2 ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11 ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v12 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2 ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13 ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9 ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v7, v12 ; GFX9-O0-NEXT: v_mov_b32_e32 v8, v2 @@ -1802,6 +1810,7 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3 ; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0 +; GFX9-O0-NEXT: s_waitcnt vmcnt(16) ; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 0 ; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 1 ; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[1:2], s[6:7] @@ -1911,14 +1920,14 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v9 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7 ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], s[6:7] ; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f @@ -1966,10 +1975,10 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v5 ; GFX9-O0-NEXT: s_and_b64 s[6:7], s[4:5], s[6:7] ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[4:5], exec ; GFX9-O0-NEXT: v_writelane_b32 v0, s4, 2 @@ -2000,16 +2009,19 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_waitcnt vmcnt(6) ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB1_5 ; GFX9-O0-NEXT: .LBB1_3: ; %Flow2 @@ -2026,10 +2038,10 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB1_9 ; GFX9-O0-NEXT: .LBB1_4: ; %udiv-loop-exit @@ -2066,10 +2078,10 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v4 ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB1_3 ; GFX9-O0-NEXT: .LBB1_5: ; %Flow1 @@ -2090,16 +2102,16 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB1_4 ; GFX9-O0-NEXT: .LBB1_6: ; %udiv-do-while @@ -2184,11 +2196,12 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5 ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v23 ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24 -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_waitcnt vmcnt(1) ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v11 ; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14 ; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15 ; GFX9-O0-NEXT: v_sub_co_u32_e32 v13, vcc, v13, v6 +; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: v_subb_co_u32_e32 v12, vcc, v12, v10, vcc ; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v11, v4, vcc ; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v5, vcc @@ -2274,22 +2287,22 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v3 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v2 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v1 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v0 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v14 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v18, v13 ; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12 ; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5] ; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 4 @@ -2301,28 +2314,28 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19] ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX9-O0-NEXT: s_cbranch_execnz .LBB1_6 @@ -2347,7 +2360,7 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; GFX9-O0-NEXT: s_waitcnt vmcnt(9) +; GFX9-O0-NEXT: s_waitcnt vmcnt(10) ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10 ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], v4, v[21:22] @@ -2420,10 +2433,10 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v13, v17 ; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7] ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[6:7] ; GFX9-O0-NEXT: v_mov_b32_e32 v15, s9 @@ -2436,28 +2449,28 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19] ; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_branch .LBB1_6 ; GFX9-O0-NEXT: .LBB1_8: ; %udiv-bb1 @@ -2474,7 +2487,7 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1 ; GFX9-O0-NEXT: s_mov_b32 s5, s6 -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_waitcnt vmcnt(1) ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1 ; GFX9-O0-NEXT: s_mov_b32 s4, s7 ; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0 @@ -2485,6 +2498,7 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5 ; GFX9-O0-NEXT: v_add_co_u32_e32 v9, vcc, v4, v5 ; GFX9-O0-NEXT: v_mov_b32_e32 v5, s4 +; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v5, vcc ; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8 ; GFX9-O0-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc @@ -2501,12 +2515,12 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2 ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9 ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b32 s4, 0x7f ; GFX9-O0-NEXT: v_sub_u32_e64 v3, s4, v4 @@ -2554,10 +2568,10 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v8, v3 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2 ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v10 @@ -2574,16 +2588,16 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6 ; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7 ; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 s[6:7], exec ; GFX9-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5] @@ -2616,9 +2630,10 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: s_waitcnt vmcnt(2) ; GFX9-O0-NEXT: v_lshrrev_b64 v[2:3], s4, v[7:8] ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2 -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_waitcnt vmcnt(1) ; GFX9-O0-NEXT: v_mov_b32_e32 v2, v13 ; GFX9-O0-NEXT: v_mul_lo_u32 v5, v6, v2 +; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: v_lshrrev_b64 v[13:14], s4, v[13:14] ; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13 ; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr7_vgpr8 killed $exec diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll b/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll index f523b4a..b2f5b6a 100644 --- a/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll +++ b/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll @@ -1316,7 +1316,6 @@ define void @spill_sgpr_no_free_vgpr(ptr addrspace(1) %out, ptr addrspace(1) %in ; GCN-NEXT: flat_load_dwordx4 v[6:9], v[2:3] ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill diff --git a/llvm/test/CodeGen/AMDGPU/spill-offset-calculation.ll b/llvm/test/CodeGen/AMDGPU/spill-offset-calculation.ll index 55238b28..daf0a2d 100644 --- a/llvm/test/CodeGen/AMDGPU/spill-offset-calculation.ll +++ b/llvm/test/CodeGen/AMDGPU/spill-offset-calculation.ll @@ -282,7 +282,7 @@ define amdgpu_kernel void @test_sgpr_offset_subregs_kernel() { ; MUBUF-NEXT: buffer_load_dword v1, off, s[0:3], 0 offset:12 glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) ; MUBUF-NEXT: buffer_store_dword v0, off, s[0:3], 0 offset:4084 ; 4-byte Folded Spill -; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: s_nop 0 ; MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:4088 ; 4-byte Folded Spill ; MUBUF-NEXT: ;;#ASMSTART ; MUBUF-NEXT: ;;#ASMEND @@ -349,7 +349,7 @@ define amdgpu_kernel void @test_inst_offset_subregs_kernel() { ; MUBUF-NEXT: s_waitcnt vmcnt(0) ; MUBUF-NEXT: s_mov_b32 s4, 0x3ff00 ; MUBUF-NEXT: buffer_store_dword v0, off, s[0:3], s4 ; 4-byte Folded Spill -; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: s_nop 0 ; MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], s4 offset:4 ; 4-byte Folded Spill ; MUBUF-NEXT: ;;#ASMSTART ; MUBUF-NEXT: ;;#ASMEND @@ -515,7 +515,7 @@ define void @test_sgpr_offset_subregs_function() { ; MUBUF-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:12 glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) ; MUBUF-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4084 ; 4-byte Folded Spill -; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: s_nop 0 ; MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4088 ; 4-byte Folded Spill ; MUBUF-NEXT: ;;#ASMSTART ; MUBUF-NEXT: ;;#ASMEND @@ -580,7 +580,7 @@ define void @test_inst_offset_subregs_function() { ; MUBUF-NEXT: s_waitcnt vmcnt(0) ; MUBUF-NEXT: s_add_i32 s4, s32, 0x3ff00 ; MUBUF-NEXT: buffer_store_dword v0, off, s[0:3], s4 ; 4-byte Folded Spill -; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: s_nop 0 ; MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], s4 offset:4 ; 4-byte Folded Spill ; MUBUF-NEXT: ;;#ASMSTART ; MUBUF-NEXT: ;;#ASMEND diff --git a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll index b9ad461..0e68c30 100644 --- a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll +++ b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll @@ -53,7 +53,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s33, 0x4f900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:4 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:12 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:16 ; 4-byte Folded Spill @@ -61,7 +60,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:16 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:20 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:24 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:28 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:32 ; 4-byte Folded Spill @@ -69,7 +67,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:32 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:36 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:40 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:44 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:48 ; 4-byte Folded Spill @@ -77,7 +74,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:48 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:52 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:56 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:60 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:64 ; 4-byte Folded Spill @@ -85,7 +81,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:64 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:68 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:72 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:76 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:80 ; 4-byte Folded Spill @@ -93,7 +88,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:80 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:84 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:88 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:92 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:96 ; 4-byte Folded Spill @@ -101,7 +95,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:96 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:100 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:104 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:108 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:112 ; 4-byte Folded Spill @@ -109,7 +102,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:112 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:116 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:120 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:124 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:128 ; 4-byte Folded Spill @@ -117,7 +109,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:128 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:132 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:136 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:140 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:144 ; 4-byte Folded Spill @@ -125,7 +116,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:144 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:148 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:152 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:156 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:160 ; 4-byte Folded Spill @@ -133,7 +123,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:160 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:164 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:168 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:172 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:176 ; 4-byte Folded Spill @@ -141,7 +130,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:176 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:180 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:184 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:188 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:192 ; 4-byte Folded Spill @@ -149,7 +137,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:192 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:196 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:200 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:204 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:208 ; 4-byte Folded Spill @@ -157,7 +144,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:208 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:212 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:216 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:220 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:224 ; 4-byte Folded Spill @@ -165,7 +151,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:224 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:228 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:232 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:236 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:240 ; 4-byte Folded Spill @@ -173,7 +158,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:240 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:244 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:248 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:252 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:256 ; 4-byte Folded Spill @@ -181,7 +165,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:256 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:260 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:264 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:268 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:272 ; 4-byte Folded Spill @@ -189,7 +172,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:272 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:276 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:280 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:284 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:288 ; 4-byte Folded Spill @@ -197,7 +179,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:288 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:292 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:296 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:300 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:304 ; 4-byte Folded Spill @@ -205,7 +186,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:304 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:308 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:312 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:316 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:320 ; 4-byte Folded Spill @@ -213,7 +193,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:320 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:324 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:328 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:332 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:336 ; 4-byte Folded Spill @@ -221,7 +200,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:336 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:340 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:344 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:348 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:352 ; 4-byte Folded Spill @@ -229,7 +207,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:352 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:356 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:360 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:364 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:368 ; 4-byte Folded Spill @@ -237,7 +214,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:368 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:372 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:376 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:380 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:384 ; 4-byte Folded Spill @@ -245,7 +221,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:384 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:388 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:392 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:396 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:400 ; 4-byte Folded Spill @@ -253,7 +228,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:400 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:404 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:408 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:412 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:416 ; 4-byte Folded Spill @@ -261,7 +235,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:416 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:420 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:424 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:428 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:432 ; 4-byte Folded Spill @@ -269,7 +242,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:432 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:436 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:440 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:444 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:448 ; 4-byte Folded Spill @@ -277,7 +249,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:448 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:452 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:456 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:460 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:464 ; 4-byte Folded Spill @@ -285,7 +256,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:464 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:468 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:472 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:476 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:480 ; 4-byte Folded Spill @@ -293,7 +263,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:480 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:484 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:488 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:492 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:496 ; 4-byte Folded Spill @@ -301,7 +270,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:496 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:500 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:504 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:508 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:512 ; 4-byte Folded Spill @@ -309,7 +277,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:512 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:516 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:520 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:524 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:528 ; 4-byte Folded Spill @@ -317,7 +284,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:528 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:532 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:536 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:540 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:544 ; 4-byte Folded Spill @@ -325,7 +291,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:544 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:548 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:552 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:556 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:560 ; 4-byte Folded Spill @@ -333,7 +298,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:560 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:564 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:568 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:572 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:576 ; 4-byte Folded Spill @@ -341,7 +305,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:576 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:580 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:584 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:588 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:592 ; 4-byte Folded Spill @@ -349,7 +312,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:592 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:596 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:600 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:604 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:608 ; 4-byte Folded Spill @@ -357,7 +319,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:608 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:612 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:616 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:620 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:624 ; 4-byte Folded Spill @@ -365,7 +326,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:624 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:628 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:632 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:636 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:640 ; 4-byte Folded Spill @@ -373,7 +333,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:640 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:644 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:648 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:652 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:656 ; 4-byte Folded Spill @@ -381,7 +340,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:656 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:660 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:664 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:668 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:672 ; 4-byte Folded Spill @@ -389,7 +347,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:672 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:676 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:680 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:684 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:688 ; 4-byte Folded Spill @@ -397,7 +354,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:688 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:692 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:696 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:700 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:704 ; 4-byte Folded Spill @@ -405,7 +361,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:704 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:708 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:712 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:716 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:720 ; 4-byte Folded Spill @@ -413,7 +368,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:720 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:724 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:728 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:732 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:736 ; 4-byte Folded Spill @@ -421,7 +375,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:736 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:740 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:744 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:748 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:752 ; 4-byte Folded Spill @@ -429,7 +382,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:752 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:756 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:760 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:764 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:768 ; 4-byte Folded Spill @@ -437,7 +389,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:768 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:772 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:776 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:780 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:784 ; 4-byte Folded Spill @@ -445,7 +396,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:784 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:788 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:792 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:796 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:800 ; 4-byte Folded Spill @@ -453,7 +403,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:800 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:804 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:808 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:812 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:816 ; 4-byte Folded Spill @@ -461,7 +410,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:816 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:820 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:824 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:828 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:832 ; 4-byte Folded Spill @@ -469,7 +417,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:832 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:836 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:840 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:844 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:848 ; 4-byte Folded Spill @@ -477,7 +424,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:848 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:852 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:856 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:860 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:864 ; 4-byte Folded Spill @@ -485,7 +431,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:864 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:868 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:872 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:876 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:880 ; 4-byte Folded Spill @@ -493,7 +438,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:880 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:884 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:888 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:892 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:896 ; 4-byte Folded Spill @@ -501,7 +445,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:896 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:900 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:904 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:908 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:912 ; 4-byte Folded Spill @@ -509,7 +452,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:912 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:916 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:920 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:924 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:928 ; 4-byte Folded Spill @@ -517,7 +459,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:928 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:932 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:936 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:940 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:944 ; 4-byte Folded Spill @@ -525,7 +466,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:944 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:948 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:952 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:956 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:960 ; 4-byte Folded Spill @@ -533,7 +473,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:960 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:964 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:968 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:972 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:976 ; 4-byte Folded Spill @@ -541,7 +480,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:976 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:980 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:984 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:988 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:992 ; 4-byte Folded Spill @@ -549,7 +487,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:992 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:996 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1000 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1004 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1008 ; 4-byte Folded Spill @@ -557,7 +494,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1008 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1012 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1016 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1020 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1024 ; 4-byte Folded Spill @@ -565,7 +501,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1024 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1028 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1032 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1036 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1040 ; 4-byte Folded Spill @@ -573,7 +508,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1040 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1044 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1048 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1052 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1056 ; 4-byte Folded Spill @@ -581,7 +515,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1056 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1060 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1064 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1068 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1072 ; 4-byte Folded Spill @@ -589,7 +522,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1072 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1076 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1080 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1084 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1088 ; 4-byte Folded Spill @@ -597,7 +529,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1088 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1092 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1096 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1100 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1104 ; 4-byte Folded Spill @@ -605,7 +536,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1104 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1108 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1112 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1116 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1120 ; 4-byte Folded Spill @@ -613,7 +543,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1120 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1124 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1128 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1132 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1136 ; 4-byte Folded Spill @@ -621,7 +550,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1136 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1140 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1144 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1148 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1152 ; 4-byte Folded Spill @@ -629,7 +557,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1152 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1156 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1160 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1164 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1168 ; 4-byte Folded Spill @@ -637,7 +564,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1168 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1172 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1176 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1180 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1184 ; 4-byte Folded Spill @@ -645,7 +571,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1184 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1188 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1192 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1196 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1200 ; 4-byte Folded Spill @@ -653,7 +578,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1200 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1204 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1208 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1212 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1216 ; 4-byte Folded Spill @@ -661,7 +585,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1216 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1220 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1224 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1228 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1232 ; 4-byte Folded Spill @@ -669,7 +592,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1232 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1236 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1240 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1244 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1248 ; 4-byte Folded Spill @@ -677,7 +599,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1248 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1252 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1256 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1260 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1264 ; 4-byte Folded Spill @@ -685,7 +606,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1264 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1268 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1272 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1276 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1280 ; 4-byte Folded Spill @@ -693,7 +613,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1280 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1284 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1288 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1292 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1296 ; 4-byte Folded Spill @@ -701,7 +620,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1296 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1300 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1304 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1308 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1312 ; 4-byte Folded Spill @@ -709,7 +627,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1312 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1316 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1320 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1324 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1328 ; 4-byte Folded Spill @@ -717,7 +634,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1328 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1332 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1336 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1340 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1344 ; 4-byte Folded Spill @@ -725,7 +641,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1344 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1348 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1352 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1356 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1360 ; 4-byte Folded Spill @@ -733,7 +648,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1360 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1364 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1368 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1372 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1376 ; 4-byte Folded Spill @@ -741,7 +655,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1376 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1380 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1384 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1388 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1392 ; 4-byte Folded Spill @@ -749,7 +662,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1392 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1396 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1400 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1404 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1408 ; 4-byte Folded Spill @@ -757,7 +669,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1408 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1412 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1416 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1420 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1424 ; 4-byte Folded Spill @@ -765,7 +676,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1424 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1428 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1432 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1436 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1440 ; 4-byte Folded Spill @@ -773,7 +683,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1440 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1444 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1448 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1452 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1456 ; 4-byte Folded Spill @@ -781,7 +690,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1456 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1460 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1464 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1468 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1472 ; 4-byte Folded Spill @@ -789,7 +697,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1472 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1476 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1480 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1484 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1488 ; 4-byte Folded Spill @@ -797,7 +704,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1488 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1492 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1496 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1500 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1504 ; 4-byte Folded Spill @@ -805,7 +711,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1504 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1508 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1512 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1516 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1520 ; 4-byte Folded Spill @@ -813,7 +718,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1520 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1524 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1528 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1532 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1536 ; 4-byte Folded Spill @@ -821,7 +725,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1536 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1540 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1544 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1548 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1552 ; 4-byte Folded Spill @@ -829,7 +732,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1552 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1556 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1560 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1564 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1568 ; 4-byte Folded Spill @@ -837,7 +739,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1568 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1572 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1576 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1580 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1584 ; 4-byte Folded Spill @@ -845,7 +746,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1584 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1588 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1592 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1596 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1600 ; 4-byte Folded Spill @@ -853,7 +753,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1600 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1604 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1608 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1612 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1616 ; 4-byte Folded Spill @@ -861,7 +760,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1616 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1620 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1624 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1628 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1632 ; 4-byte Folded Spill @@ -869,7 +767,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1632 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1636 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1640 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1644 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1648 ; 4-byte Folded Spill @@ -877,7 +774,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1648 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1652 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1656 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1660 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1664 ; 4-byte Folded Spill @@ -885,7 +781,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1664 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1668 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1672 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1676 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1680 ; 4-byte Folded Spill @@ -893,7 +788,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1680 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1684 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1688 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1692 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1696 ; 4-byte Folded Spill @@ -901,7 +795,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1696 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1700 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1704 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1708 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1712 ; 4-byte Folded Spill @@ -909,7 +802,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1712 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1716 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1720 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1724 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1728 ; 4-byte Folded Spill @@ -917,7 +809,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1728 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1732 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1736 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1740 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1744 ; 4-byte Folded Spill @@ -925,7 +816,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1744 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1748 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1752 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1756 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1760 ; 4-byte Folded Spill @@ -933,7 +823,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1760 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1764 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1768 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1772 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1776 ; 4-byte Folded Spill @@ -941,7 +830,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1776 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1780 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1784 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1788 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1792 ; 4-byte Folded Spill @@ -949,7 +837,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1792 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1796 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1800 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1804 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1808 ; 4-byte Folded Spill @@ -957,7 +844,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1808 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1812 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1816 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1820 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1824 ; 4-byte Folded Spill @@ -965,7 +851,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1824 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1828 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1832 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1836 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1840 ; 4-byte Folded Spill @@ -973,7 +858,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1840 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1844 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1848 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1852 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1856 ; 4-byte Folded Spill @@ -981,7 +865,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1856 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1860 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1864 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1868 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1872 ; 4-byte Folded Spill @@ -989,7 +872,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1872 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1876 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1880 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1884 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1888 ; 4-byte Folded Spill @@ -997,7 +879,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1888 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1892 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1896 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1900 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1904 ; 4-byte Folded Spill @@ -1005,7 +886,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1904 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1908 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1912 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1916 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1920 ; 4-byte Folded Spill @@ -1013,7 +893,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1920 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1924 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1928 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1932 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1936 ; 4-byte Folded Spill @@ -1021,7 +900,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1936 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1940 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1944 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1948 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1952 ; 4-byte Folded Spill @@ -1029,7 +907,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1952 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1956 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1960 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1964 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1968 ; 4-byte Folded Spill @@ -1037,7 +914,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1968 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1972 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1976 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1980 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:1984 ; 4-byte Folded Spill @@ -1045,7 +921,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:1984 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:1988 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:1992 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:1996 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2000 ; 4-byte Folded Spill @@ -1053,7 +928,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2000 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2004 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2008 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2012 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2016 ; 4-byte Folded Spill @@ -1061,7 +935,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2016 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2020 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2024 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2028 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2032 ; 4-byte Folded Spill @@ -1069,7 +942,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2032 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2036 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2040 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2044 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2048 ; 4-byte Folded Spill @@ -1077,7 +949,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2048 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2052 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2056 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2060 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2064 ; 4-byte Folded Spill @@ -1085,7 +956,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2064 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2068 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2072 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2076 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2080 ; 4-byte Folded Spill @@ -1093,7 +963,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2080 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2084 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2088 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2092 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2096 ; 4-byte Folded Spill @@ -1101,7 +970,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2096 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2100 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2104 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2108 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2112 ; 4-byte Folded Spill @@ -1109,7 +977,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2112 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2116 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2120 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2124 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2128 ; 4-byte Folded Spill @@ -1117,7 +984,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2128 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2132 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2136 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2140 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2144 ; 4-byte Folded Spill @@ -1125,7 +991,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2144 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2148 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2152 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2156 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2160 ; 4-byte Folded Spill @@ -1133,7 +998,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2160 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2164 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2168 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2172 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2176 ; 4-byte Folded Spill @@ -1141,7 +1005,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2176 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2180 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2184 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2188 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2192 ; 4-byte Folded Spill @@ -1149,7 +1012,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2192 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2196 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2200 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2204 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2208 ; 4-byte Folded Spill @@ -1157,7 +1019,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2208 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2212 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2216 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2220 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2224 ; 4-byte Folded Spill @@ -1165,7 +1026,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2224 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2228 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2232 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2236 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2240 ; 4-byte Folded Spill @@ -1173,7 +1033,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2240 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2244 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2248 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2252 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2256 ; 4-byte Folded Spill @@ -1181,7 +1040,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2256 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2260 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2264 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2268 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2272 ; 4-byte Folded Spill @@ -1189,7 +1047,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2272 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2276 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2280 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2284 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2288 ; 4-byte Folded Spill @@ -1197,7 +1054,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2288 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2292 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2296 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2300 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2304 ; 4-byte Folded Spill @@ -1205,7 +1061,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2304 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2308 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2312 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2316 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2320 ; 4-byte Folded Spill @@ -1213,7 +1068,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2320 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2324 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2328 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2332 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2336 ; 4-byte Folded Spill @@ -1221,7 +1075,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2336 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2340 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2344 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2348 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2352 ; 4-byte Folded Spill @@ -1229,7 +1082,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2352 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2356 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2360 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2364 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2368 ; 4-byte Folded Spill @@ -1237,7 +1089,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2368 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2372 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2376 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2380 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2384 ; 4-byte Folded Spill @@ -1245,7 +1096,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2384 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2388 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2392 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2396 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2400 ; 4-byte Folded Spill @@ -1253,7 +1103,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2400 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2404 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2408 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2412 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2416 ; 4-byte Folded Spill @@ -1261,7 +1110,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2416 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2420 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2424 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2428 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2432 ; 4-byte Folded Spill @@ -1269,7 +1117,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2432 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2436 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2440 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2444 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2448 ; 4-byte Folded Spill @@ -1277,7 +1124,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2448 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2452 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2456 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2460 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2464 ; 4-byte Folded Spill @@ -1285,7 +1131,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2464 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2468 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2472 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2476 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2480 ; 4-byte Folded Spill @@ -1293,7 +1138,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2480 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2484 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2488 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2492 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2496 ; 4-byte Folded Spill @@ -1301,7 +1145,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2496 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2500 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2504 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2508 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2512 ; 4-byte Folded Spill @@ -1309,7 +1152,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2512 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2516 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2520 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2524 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2528 ; 4-byte Folded Spill @@ -1317,7 +1159,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2528 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2532 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2536 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2540 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2544 ; 4-byte Folded Spill @@ -1325,7 +1166,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2544 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2548 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2552 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2556 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2560 ; 4-byte Folded Spill @@ -1333,7 +1173,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2560 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2564 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2568 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2572 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2576 ; 4-byte Folded Spill @@ -1341,7 +1180,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2576 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2580 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2584 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2588 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2592 ; 4-byte Folded Spill @@ -1349,7 +1187,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2592 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2596 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2600 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2604 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2608 ; 4-byte Folded Spill @@ -1357,7 +1194,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2608 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2612 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2616 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2620 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2624 ; 4-byte Folded Spill @@ -1365,7 +1201,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2624 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2628 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2632 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2636 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2640 ; 4-byte Folded Spill @@ -1373,7 +1208,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2640 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2644 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2648 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2652 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2656 ; 4-byte Folded Spill @@ -1381,7 +1215,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2656 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2660 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2664 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2668 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2672 ; 4-byte Folded Spill @@ -1389,7 +1222,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2672 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2676 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2680 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2684 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2688 ; 4-byte Folded Spill @@ -1397,7 +1229,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2688 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2692 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2696 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2700 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2704 ; 4-byte Folded Spill @@ -1405,7 +1236,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2704 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2708 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2712 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2716 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2720 ; 4-byte Folded Spill @@ -1413,7 +1243,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2720 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2724 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2728 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2732 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2736 ; 4-byte Folded Spill @@ -1421,7 +1250,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2736 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2740 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2744 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2748 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2752 ; 4-byte Folded Spill @@ -1429,7 +1257,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2752 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2756 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2760 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2764 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2768 ; 4-byte Folded Spill @@ -1437,7 +1264,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2768 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2772 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2776 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2780 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2784 ; 4-byte Folded Spill @@ -1445,7 +1271,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2784 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2788 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2792 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2796 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2800 ; 4-byte Folded Spill @@ -1453,7 +1278,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2800 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2804 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2808 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2812 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2816 ; 4-byte Folded Spill @@ -1461,7 +1285,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2816 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2820 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2824 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2828 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2832 ; 4-byte Folded Spill @@ -1469,7 +1292,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2832 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2836 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2840 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2844 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2848 ; 4-byte Folded Spill @@ -1477,7 +1299,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2848 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2852 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2856 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2860 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2864 ; 4-byte Folded Spill @@ -1485,7 +1306,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2864 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2868 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2872 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2876 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2880 ; 4-byte Folded Spill @@ -1493,7 +1313,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2880 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2884 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2888 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2892 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2896 ; 4-byte Folded Spill @@ -1501,7 +1320,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2896 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2900 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2904 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2908 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2912 ; 4-byte Folded Spill @@ -1509,7 +1327,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2912 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2916 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2920 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2924 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2928 ; 4-byte Folded Spill @@ -1517,7 +1334,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2928 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2932 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2936 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2940 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2944 ; 4-byte Folded Spill @@ -1525,7 +1341,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2944 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2948 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2952 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2956 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2960 ; 4-byte Folded Spill @@ -1533,7 +1348,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2960 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2964 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2968 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2972 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2976 ; 4-byte Folded Spill @@ -1541,7 +1355,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2976 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2980 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:2984 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:2988 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:2992 ; 4-byte Folded Spill @@ -1549,7 +1362,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:2992 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:2996 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3000 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3004 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3008 ; 4-byte Folded Spill @@ -1557,7 +1369,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3008 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3012 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3016 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3020 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3024 ; 4-byte Folded Spill @@ -1565,7 +1376,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3024 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3028 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3032 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3036 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3040 ; 4-byte Folded Spill @@ -1573,7 +1383,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3040 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3044 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3048 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3052 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3056 ; 4-byte Folded Spill @@ -1581,7 +1390,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3056 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3060 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3064 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3068 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3072 ; 4-byte Folded Spill @@ -1589,7 +1397,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3072 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3076 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3080 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3084 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3088 ; 4-byte Folded Spill @@ -1597,7 +1404,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3088 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3092 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3096 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3100 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3104 ; 4-byte Folded Spill @@ -1605,7 +1411,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3104 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3108 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3112 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3116 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3120 ; 4-byte Folded Spill @@ -1613,7 +1418,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3120 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3124 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3128 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3132 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3136 ; 4-byte Folded Spill @@ -1621,7 +1425,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3136 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3140 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3144 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3148 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3152 ; 4-byte Folded Spill @@ -1629,7 +1432,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3152 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3156 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3160 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3164 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3168 ; 4-byte Folded Spill @@ -1637,7 +1439,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3168 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3172 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3176 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3180 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3184 ; 4-byte Folded Spill @@ -1645,7 +1446,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3184 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3188 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3192 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3196 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3200 ; 4-byte Folded Spill @@ -1653,7 +1453,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3200 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3204 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3208 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3212 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3216 ; 4-byte Folded Spill @@ -1661,7 +1460,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3216 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3220 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3224 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3228 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3232 ; 4-byte Folded Spill @@ -1669,7 +1467,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3232 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3236 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3240 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3244 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3248 ; 4-byte Folded Spill @@ -1677,7 +1474,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3248 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3252 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3256 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3260 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3264 ; 4-byte Folded Spill @@ -1685,7 +1481,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3264 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3268 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3272 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3276 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3280 ; 4-byte Folded Spill @@ -1693,7 +1488,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3280 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3284 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3288 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3292 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3296 ; 4-byte Folded Spill @@ -1701,7 +1495,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3296 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3300 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3304 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3308 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3312 ; 4-byte Folded Spill @@ -1709,7 +1502,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3312 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3316 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3320 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3324 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3328 ; 4-byte Folded Spill @@ -1717,7 +1509,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3328 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3332 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3336 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3340 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3344 ; 4-byte Folded Spill @@ -1725,7 +1516,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3344 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3348 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3352 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3356 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3360 ; 4-byte Folded Spill @@ -1733,7 +1523,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3360 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3364 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3368 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3372 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3376 ; 4-byte Folded Spill @@ -1741,7 +1530,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3376 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3380 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3384 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3388 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3392 ; 4-byte Folded Spill @@ -1749,7 +1537,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3392 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3396 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3400 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3404 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3408 ; 4-byte Folded Spill @@ -1757,7 +1544,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3408 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3412 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3416 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3420 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3424 ; 4-byte Folded Spill @@ -1765,7 +1551,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3424 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3428 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3432 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3436 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3440 ; 4-byte Folded Spill @@ -1773,7 +1558,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3440 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3444 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3448 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3452 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3456 ; 4-byte Folded Spill @@ -1781,7 +1565,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3456 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3460 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3464 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3468 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3472 ; 4-byte Folded Spill @@ -1789,7 +1572,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3472 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3476 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3480 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3484 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3488 ; 4-byte Folded Spill @@ -1797,7 +1579,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3488 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3492 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3496 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3500 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3504 ; 4-byte Folded Spill @@ -1805,7 +1586,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3504 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3508 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3512 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3516 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3520 ; 4-byte Folded Spill @@ -1813,7 +1593,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3520 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3524 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3528 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3532 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3536 ; 4-byte Folded Spill @@ -1821,7 +1600,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3536 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3540 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3544 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3548 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3552 ; 4-byte Folded Spill @@ -1829,7 +1607,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3552 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3556 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3560 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3564 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3568 ; 4-byte Folded Spill @@ -1837,7 +1614,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3568 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3572 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3576 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3580 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3584 ; 4-byte Folded Spill @@ -1845,7 +1621,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3584 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3588 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3592 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3596 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3600 ; 4-byte Folded Spill @@ -1853,7 +1628,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3600 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3604 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3608 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3612 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3616 ; 4-byte Folded Spill @@ -1861,7 +1635,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3616 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3620 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3624 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3628 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3632 ; 4-byte Folded Spill @@ -1869,7 +1642,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3632 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3636 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3640 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3644 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3648 ; 4-byte Folded Spill @@ -1877,7 +1649,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3648 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3652 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3656 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3660 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3664 ; 4-byte Folded Spill @@ -1885,7 +1656,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3664 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3668 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3672 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3676 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3680 ; 4-byte Folded Spill @@ -1893,7 +1663,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3680 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3684 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3688 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3692 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3696 ; 4-byte Folded Spill @@ -1901,7 +1670,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3696 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3700 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3704 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3708 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3712 ; 4-byte Folded Spill @@ -1909,7 +1677,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3712 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3716 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3720 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3724 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3728 ; 4-byte Folded Spill @@ -1917,7 +1684,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3728 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3732 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3736 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3740 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3744 ; 4-byte Folded Spill @@ -1925,7 +1691,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3744 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3748 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3752 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3756 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3760 ; 4-byte Folded Spill @@ -1933,7 +1698,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3760 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3764 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3768 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3772 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3776 ; 4-byte Folded Spill @@ -1941,7 +1705,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3776 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3780 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3784 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3788 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3792 ; 4-byte Folded Spill @@ -1949,7 +1712,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3792 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3796 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3800 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3804 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3808 ; 4-byte Folded Spill @@ -1957,7 +1719,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3808 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3812 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3816 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3820 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3824 ; 4-byte Folded Spill @@ -1965,7 +1726,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3824 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3828 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3832 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3836 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3840 ; 4-byte Folded Spill @@ -1973,7 +1733,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3840 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3844 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3848 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3852 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3856 ; 4-byte Folded Spill @@ -1981,7 +1740,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3856 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3860 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3864 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3868 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3872 ; 4-byte Folded Spill @@ -1989,7 +1747,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3872 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3876 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3880 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3884 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3888 ; 4-byte Folded Spill @@ -1997,7 +1754,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3888 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3892 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3896 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3900 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3904 ; 4-byte Folded Spill @@ -2005,7 +1761,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3904 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3908 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3912 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3916 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3920 ; 4-byte Folded Spill @@ -2013,7 +1768,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3920 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3924 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3928 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3932 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3936 ; 4-byte Folded Spill @@ -2021,7 +1775,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3936 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3940 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3944 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3948 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3952 ; 4-byte Folded Spill @@ -2029,7 +1782,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3952 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3956 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3960 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3964 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3968 ; 4-byte Folded Spill @@ -2037,7 +1789,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3968 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3972 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3976 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3980 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:3984 ; 4-byte Folded Spill @@ -2045,7 +1796,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:3984 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:3988 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:3992 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:3996 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:4000 ; 4-byte Folded Spill @@ -2053,7 +1803,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:4000 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:4004 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4008 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4012 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:4016 ; 4-byte Folded Spill @@ -2061,7 +1810,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:4016 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:4020 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4024 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4028 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:4032 ; 4-byte Folded Spill @@ -2069,7 +1817,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:4032 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:4036 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4040 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4044 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:4048 ; 4-byte Folded Spill @@ -2077,7 +1824,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:4048 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:4052 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4056 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4060 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:4064 ; 4-byte Folded Spill @@ -2085,7 +1831,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: buffer_load_dwordx4 v[0:3], v[5:6], s[4:7], 0 addr64 offset:4064 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:4068 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4072 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4076 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:4080 ; 4-byte Folded Spill @@ -2094,7 +1839,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b64 s[4:5], 0x80 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2103,7 +1847,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x40100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2112,7 +1855,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x40500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2121,7 +1863,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x40900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2130,7 +1871,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x40d00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2139,7 +1879,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x41100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2148,7 +1887,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x41500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2157,7 +1895,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x41900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2166,7 +1903,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x41d00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2175,7 +1911,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x42100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2184,7 +1919,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x42500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2193,7 +1927,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x42900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2202,7 +1935,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x42d00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2211,7 +1943,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x43100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2220,7 +1951,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x43500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2229,7 +1959,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x43900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2238,7 +1967,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x43d00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2247,7 +1975,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x44100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2256,7 +1983,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x44500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2265,7 +1991,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x44900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2274,7 +1999,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x44d00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2283,7 +2007,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x45100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2292,7 +2015,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x45500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2301,7 +2023,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x45900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2310,7 +2031,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x45d00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2319,7 +2039,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x46100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2328,7 +2047,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x46500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2337,7 +2055,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x46900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2346,7 +2063,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x46d00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2355,7 +2071,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x47100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2364,7 +2079,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x47500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2373,7 +2087,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x47900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2382,7 +2095,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x47d00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2391,7 +2103,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x48100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2400,7 +2111,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x48500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2409,7 +2119,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x48900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2418,7 +2127,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x48d00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2427,7 +2135,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x49100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2436,7 +2143,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x49500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2445,7 +2151,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x49900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2454,7 +2159,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x49d00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2463,7 +2167,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4a100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2472,7 +2175,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4a500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2481,7 +2183,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4a900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2490,7 +2191,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4ad00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2499,7 +2199,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4b100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2508,7 +2207,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4b500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2517,7 +2215,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4b900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2526,7 +2223,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4bd00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2535,7 +2231,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4c100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2544,7 +2239,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4c500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2553,7 +2247,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4c900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2562,7 +2255,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4cd00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2571,7 +2263,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4d100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2580,7 +2271,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4d500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2589,7 +2279,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4d900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2598,7 +2287,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4dd00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2607,7 +2295,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4e100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2616,7 +2303,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4e500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2625,7 +2311,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4e900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2634,7 +2319,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4ed00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2643,7 +2327,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4f100 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2652,7 +2335,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4f500 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -2661,7 +2343,6 @@ define amdgpu_kernel void @test(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GFX6-NEXT: s_mov_b32 s2, 0x4f900 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -10102,7 +9783,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: s_mov_b64 s[8:9], exec ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -10111,7 +9791,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: s_mov_b32 s2, 0x84000 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -10120,7 +9799,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: s_mov_b32 s2, 0x83c00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -10129,7 +9807,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: s_mov_b32 s2, 0x83800 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -10138,7 +9815,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: s_mov_b32 s2, 0x83400 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -10147,7 +9823,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: s_mov_b32 s2, 0x83000 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -10156,7 +9831,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: s_mov_b32 s2, 0x82c00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -10165,7 +9839,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: s_mov_b32 s2, 0x82800 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -10174,7 +9847,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: s_mov_b32 s2, 0x82400 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -10183,7 +9855,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: s_mov_b32 s2, 0x82000 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -10192,7 +9863,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: s_mov_b32 s2, 0x81c00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -10201,7 +9871,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: s_mov_b32 s2, 0x81400 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v0, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v1, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v2, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v3, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -10211,7 +9880,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: s_mov_b32 s2, 0x80c00 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -10220,7 +9888,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: s_mov_b32 s2, 0x81000 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s2 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s2 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s2 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s2 offset:12 ; 4-byte Folded Spill @@ -10244,7 +9911,6 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac ; GFX6-NEXT: v_add_i32_e32 v4, vcc, 16, v4 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v7, off, s[40:43], s0 ; 4-byte Folded Spill -; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_store_dword v8, off, s[40:43], s0 offset:4 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v9, off, s[40:43], s0 offset:8 ; 4-byte Folded Spill ; GFX6-NEXT: buffer_store_dword v10, off, s[40:43], s0 offset:12 ; 4-byte Folded Spill diff --git a/llvm/test/CodeGen/AMDGPU/spill-wait.mir b/llvm/test/CodeGen/AMDGPU/spill-wait.mir new file mode 100644 index 0000000..8e89625 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/spill-wait.mir @@ -0,0 +1,89 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 +# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -run-pass si-insert-waitcnts %s -o - | FileCheck -check-prefix=GCN %s + +# There shall be no S_WAITCNT between two stores. + +--- +name: spill_vgpr_tuple + +body: | + bb.0: + liveins: $vgpr0_vgpr1, $sgpr76_sgpr77_sgpr78_sgpr79 + + ; GCN-LABEL: name: spill_vgpr_tuple + ; GCN: liveins: $vgpr0_vgpr1, $sgpr76_sgpr77_sgpr78_sgpr79 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: S_WAITCNT 0 + ; GCN-NEXT: $vgpr64_vgpr65 = V_MOV_B64_e32 $vgpr0_vgpr1, implicit $exec + ; GCN-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr64, $sgpr76_sgpr77_sgpr78_sgpr79, 0, 672, 0, 0, implicit $exec, implicit-def $vgpr64_vgpr65, implicit $vgpr64_vgpr65 + ; GCN-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr65, $sgpr76_sgpr77_sgpr78_sgpr79, 0, 676, 0, 0, implicit $exec, implicit $vgpr64_vgpr65 + ; GCN-NEXT: S_ENDPGM 0 + $vgpr64_vgpr65 = V_MOV_B64_e32 $vgpr0_vgpr1, implicit $exec + BUFFER_STORE_DWORD_OFFSET killed $vgpr64, $sgpr76_sgpr77_sgpr78_sgpr79, 0, 672, 0, 0, implicit $exec, implicit-def $vgpr64_vgpr65, implicit $vgpr64_vgpr65 + BUFFER_STORE_DWORD_OFFSET $vgpr65, $sgpr76_sgpr77_sgpr78_sgpr79, 0, 676, 0, 0, implicit $exec, implicit $vgpr64_vgpr65 + S_ENDPGM 0 +... + +# Make sure that while ignoring implicit operands we will not ignore implicit $vcc on VALU + +--- +name: load_vcc_wait + +body: | + bb.0: + liveins: $vgpr0, $sgpr10_sgpr11 + + ; GCN-LABEL: name: load_vcc_wait + ; GCN: liveins: $vgpr0, $sgpr10_sgpr11 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: S_WAITCNT 0 + ; GCN-NEXT: $vcc_lo = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: S_WAITCNT 49279 + ; GCN-NEXT: $vgpr1 = V_ADDC_U32_e32 0, $vgpr0, implicit-def $vcc, implicit $vcc, implicit $exec + ; GCN-NEXT: S_ENDPGM 0 + $vcc_lo = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $vgpr1 = V_ADDC_U32_e32 0, $vgpr0, implicit-def $vcc, implicit $vcc, implicit $exec + S_ENDPGM 0 +... + +# Make sure that while ignoring implicit operands we will not ignore implicit $flat_src on FLAT + +--- +name: load_flat_scr_lo_flat_load_wait + +body: | + bb.0: + liveins: $sgpr10_sgpr11, $vgpr0_vgpr1 + + ; GCN-LABEL: name: load_flat_scr_lo_flat_load_wait + ; GCN: liveins: $sgpr10_sgpr11, $vgpr0_vgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: S_WAITCNT 0 + ; GCN-NEXT: $flat_scr_lo = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: S_WAITCNT 49279 + ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: S_ENDPGM 0 + $flat_scr_lo = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + S_ENDPGM 0 +... + +--- +name: load_flat_scr_lo_scratch_store_wait + +body: | + bb.0: + liveins: $sgpr10_sgpr11, $vgpr0, $sgpr32 + + ; GCN-LABEL: name: load_flat_scr_lo_scratch_store_wait + ; GCN: liveins: $sgpr10_sgpr11, $vgpr0, $sgpr32 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: S_WAITCNT 0 + ; GCN-NEXT: $flat_scr_hi = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + ; GCN-NEXT: S_WAITCNT 49279 + ; GCN-NEXT: SCRATCH_STORE_DWORD_SADDR $vgpr0, $sgpr32, 0, 0, implicit $exec, implicit $flat_scr + ; GCN-NEXT: S_ENDPGM 0 + $flat_scr_hi = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0 + SCRATCH_STORE_DWORD_SADDR $vgpr0, $sgpr32, 0, 0, implicit $exec, implicit $flat_scr + S_ENDPGM 0 +... diff --git a/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll b/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll index 33c06e5..2d5e5a9 100644 --- a/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll +++ b/llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll @@ -255,7 +255,7 @@ define amdgpu_kernel void @v256i8_liveout(ptr addrspace(1) %src1, ptr addrspace( ; GFX906-NEXT: v_mov_b32_e32 v4, 0 ; GFX906-NEXT: s_waitcnt vmcnt(0) ; GFX906-NEXT: buffer_store_dword v5, off, s[12:15], 0 ; 4-byte Folded Spill -; GFX906-NEXT: s_waitcnt vmcnt(0) +; GFX906-NEXT: s_nop 0 ; GFX906-NEXT: buffer_store_dword v6, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill ; GFX906-NEXT: buffer_store_dword v7, off, s[12:15], 0 offset:8 ; 4-byte Folded Spill ; GFX906-NEXT: buffer_store_dword v8, off, s[12:15], 0 offset:12 ; 4-byte Folded Spill @@ -281,7 +281,7 @@ define amdgpu_kernel void @v256i8_liveout(ptr addrspace(1) %src1, ptr addrspace( ; GFX906-NEXT: global_load_dwordx4 v[0:3], v61, s[6:7] offset:240 ; GFX906-NEXT: s_waitcnt vmcnt(0) ; GFX906-NEXT: buffer_store_dword v0, off, s[12:15], 0 ; 4-byte Folded Spill -; GFX906-NEXT: s_waitcnt vmcnt(0) +; GFX906-NEXT: s_nop 0 ; GFX906-NEXT: buffer_store_dword v1, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill ; GFX906-NEXT: buffer_store_dword v2, off, s[12:15], 0 offset:8 ; 4-byte Folded Spill ; GFX906-NEXT: buffer_store_dword v3, off, s[12:15], 0 offset:12 ; 4-byte Folded Spill diff --git a/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll index b35ef64..c295a05 100644 --- a/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll +++ b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll @@ -167,7 +167,7 @@ define amdgpu_gfx void @strict_wwm_cfg(ptr addrspace(8) inreg %tmp14, i32 %arg) ; GFX9-O0-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], s34 ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; GFX9-O0-NEXT: ; implicit-def: $sgpr36_sgpr37 ; GFX9-O0-NEXT: ; implicit-def: $sgpr36_sgpr37 @@ -548,13 +548,13 @@ define amdgpu_gfx void @strict_wwm_call_i64(ptr addrspace(8) inreg %tmp14, i64 i ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:16 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s33 offset:28 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill @@ -686,7 +686,7 @@ define amdgpu_gfx void @strict_wwm_call_i64(ptr addrspace(8) inreg %tmp14, i64 i ; GFX9-O3-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill ; GFX9-O3-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:16 ; 4-byte Folded Spill ; GFX9-O3-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill -; GFX9-O3-NEXT: s_waitcnt vmcnt(0) +; GFX9-O3-NEXT: s_nop 0 ; GFX9-O3-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill ; GFX9-O3-NEXT: s_mov_b64 exec, s[34:35] ; GFX9-O3-NEXT: v_writelane_b32 v8, s30, 0 @@ -747,7 +747,7 @@ define amdgpu_gfx void @strict_wwm_amdgpu_cs_main(<4 x i32> inreg %desc, i32 %in ; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 exec, s[34:35] ; GFX9-O0-NEXT: s_mov_b32 s36, s4 @@ -845,13 +845,13 @@ define amdgpu_gfx void @strict_wwm_amdgpu_cs_main(<4 x i32> inreg %desc, i32 %in ; GFX9-O3-NEXT: s_xor_saveexec_b64 s[34:35], -1 ; GFX9-O3-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-O3-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; GFX9-O3-NEXT: s_waitcnt vmcnt(0) +; GFX9-O3-NEXT: s_nop 0 ; GFX9-O3-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; GFX9-O3-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; GFX9-O3-NEXT: s_waitcnt vmcnt(0) +; GFX9-O3-NEXT: s_nop 0 ; GFX9-O3-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; GFX9-O3-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; GFX9-O3-NEXT: s_waitcnt vmcnt(0) +; GFX9-O3-NEXT: s_nop 0 ; GFX9-O3-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill ; GFX9-O3-NEXT: s_mov_b64 exec, s[34:35] ; GFX9-O3-NEXT: v_lshlrev_b32_e32 v0, 5, v0 @@ -921,7 +921,7 @@ define amdgpu_gfx <32 x i32> @strict_wwm_callee_saves(<32 x i32> inreg %keep, pt ; GFX9-O0-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 exec, -1 ; GFX9-O0-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill @@ -1286,7 +1286,7 @@ define amdgpu_gfx <32 x i32> @strict_wwm_callee_saves(<32 x i32> inreg %keep, pt ; GFX9-O3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-O3-NEXT: s_xor_saveexec_b64 s[34:35], -1 ; GFX9-O3-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; GFX9-O3-NEXT: s_waitcnt vmcnt(0) +; GFX9-O3-NEXT: s_nop 0 ; GFX9-O3-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill ; GFX9-O3-NEXT: s_mov_b64 exec, s[34:35] ; GFX9-O3-NEXT: buffer_load_dword v26, off, s[0:3], s32 diff --git a/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll b/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll index 7fecab0..ee91748 100644 --- a/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll +++ b/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll @@ -144,7 +144,7 @@ define amdgpu_cs void @cfg(ptr addrspace(8) inreg %tmp14, i32 %arg) { ; GFX9-O0-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], s0 ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[16:19], 0 offset:12 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[16:19], 0 offset:16 ; 4-byte Folded Spill ; GFX9-O0-NEXT: ; implicit-def: $sgpr2_sgpr3 ; GFX9-O0-NEXT: ; implicit-def: $sgpr2_sgpr3 @@ -1037,7 +1037,7 @@ define amdgpu_cs void @strict_wwm_cfg(ptr addrspace(8) inreg %tmp14, i32 %arg) { ; GFX9-O0-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], s0 ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: buffer_store_dword v4, off, s[16:19], 0 offset:12 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_waitcnt vmcnt(0) +; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_store_dword v5, off, s[16:19], 0 offset:16 ; 4-byte Folded Spill ; GFX9-O0-NEXT: ; implicit-def: $sgpr2_sgpr3 ; GFX9-O0-NEXT: ; implicit-def: $sgpr2_sgpr3 diff --git a/llvm/test/CodeGen/LoongArch/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/LoongArch/ctlz-cttz-ctpop.ll index 3efdd08..a261027 100644 --- a/llvm/test/CodeGen/LoongArch/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/LoongArch/ctlz-cttz-ctpop.ll @@ -89,18 +89,14 @@ define i64 @test_ctlz_i64(i64 %a) nounwind { define i8 @test_not_ctlz_i8(i8 %a) nounwind { ; LA32-LABEL: test_not_ctlz_i8: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 255 -; LA32-NEXT: andn $a0, $a1, $a0 -; LA32-NEXT: clz.w $a0, $a0 -; LA32-NEXT: addi.w $a0, $a0, -24 +; LA32-NEXT: slli.w $a0, $a0, 24 +; LA32-NEXT: clo.w $a0, $a0 ; LA32-NEXT: ret ; ; LA64-LABEL: test_not_ctlz_i8: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 255 -; LA64-NEXT: andn $a0, $a1, $a0 -; LA64-NEXT: clz.d $a0, $a0 -; LA64-NEXT: addi.d $a0, $a0, -56 +; LA64-NEXT: slli.d $a0, $a0, 56 +; LA64-NEXT: clo.d $a0, $a0 ; LA64-NEXT: ret %neg = xor i8 %a, -1 %tmp = call i8 @llvm.ctlz.i8(i8 %neg, i1 false) @@ -110,18 +106,14 @@ define i8 @test_not_ctlz_i8(i8 %a) nounwind { define i16 @test_not_ctlz_i16(i16 %a) nounwind { ; LA32-LABEL: test_not_ctlz_i16: ; LA32: # %bb.0: -; LA32-NEXT: nor $a0, $a0, $zero -; LA32-NEXT: bstrpick.w $a0, $a0, 15, 0 -; LA32-NEXT: clz.w $a0, $a0 -; LA32-NEXT: addi.w $a0, $a0, -16 +; LA32-NEXT: slli.w $a0, $a0, 16 +; LA32-NEXT: clo.w $a0, $a0 ; LA32-NEXT: ret ; ; LA64-LABEL: test_not_ctlz_i16: ; LA64: # %bb.0: -; LA64-NEXT: nor $a0, $a0, $zero -; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0 -; LA64-NEXT: clz.d $a0, $a0 -; LA64-NEXT: addi.d $a0, $a0, -48 +; LA64-NEXT: slli.d $a0, $a0, 48 +; LA64-NEXT: clo.d $a0, $a0 ; LA64-NEXT: ret %neg = xor i16 %a, -1 %tmp = call i16 @llvm.ctlz.i16(i16 %neg, i1 false) diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/splat_vector.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/splat_vector.ll new file mode 100644 index 0000000..6a1c3ca --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/splat_vector.ll @@ -0,0 +1,821 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh -global-isel -stop-after=irtranslator \ +; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s +; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh -global-isel -stop-after=irtranslator \ +; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s + +define <vscale x 1 x i1> @splat_zero_nxv1i1() { + ; RV32-LABEL: name: splat_zero_nxv1i1 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR [[C]](s1) + ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s1>) + ; RV32-NEXT: PseudoRET implicit $v0 + ; + ; RV64-LABEL: name: splat_zero_nxv1i1 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR [[C]](s1) + ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s1>) + ; RV64-NEXT: PseudoRET implicit $v0 + ret <vscale x 1 x i1> zeroinitializer +} + +define <vscale x 2 x i1> @splat_zero_nxv2i1() { + ; RV32-LABEL: name: splat_zero_nxv2i1 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR [[C]](s1) + ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s1>) + ; RV32-NEXT: PseudoRET implicit $v0 + ; + ; RV64-LABEL: name: splat_zero_nxv2i1 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR [[C]](s1) + ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s1>) + ; RV64-NEXT: PseudoRET implicit $v0 + ret <vscale x 2 x i1> zeroinitializer +} + +define <vscale x 4 x i1> @splat_zero_nxv4i1() { + ; RV32-LABEL: name: splat_zero_nxv4i1 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR [[C]](s1) + ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s1>) + ; RV32-NEXT: PseudoRET implicit $v0 + ; + ; RV64-LABEL: name: splat_zero_nxv4i1 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR [[C]](s1) + ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s1>) + ; RV64-NEXT: PseudoRET implicit $v0 + ret <vscale x 4 x i1> zeroinitializer +} + +define <vscale x 8 x i1> @splat_zero_nxv8i1() { + ; RV32-LABEL: name: splat_zero_nxv8i1 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR [[C]](s1) + ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s1>) + ; RV32-NEXT: PseudoRET implicit $v0 + ; + ; RV64-LABEL: name: splat_zero_nxv8i1 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR [[C]](s1) + ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s1>) + ; RV64-NEXT: PseudoRET implicit $v0 + ret <vscale x 8 x i1> zeroinitializer +} + +define <vscale x 16 x i1> @splat_zero_nxv16i1() { + ; RV32-LABEL: name: splat_zero_nxv16i1 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR [[C]](s1) + ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s1>) + ; RV32-NEXT: PseudoRET implicit $v0 + ; + ; RV64-LABEL: name: splat_zero_nxv16i1 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR [[C]](s1) + ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s1>) + ; RV64-NEXT: PseudoRET implicit $v0 + ret <vscale x 16 x i1> zeroinitializer +} + +define <vscale x 32 x i1> @splat_zero_nxv32i1() { + ; RV32-LABEL: name: splat_zero_nxv32i1 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR [[C]](s1) + ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s1>) + ; RV32-NEXT: PseudoRET implicit $v0 + ; + ; RV64-LABEL: name: splat_zero_nxv32i1 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR [[C]](s1) + ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s1>) + ; RV64-NEXT: PseudoRET implicit $v0 + ret <vscale x 32 x i1> zeroinitializer +} + +define <vscale x 64 x i1> @splat_zero_nxv64i1() { + ; RV32-LABEL: name: splat_zero_nxv64i1 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR [[C]](s1) + ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s1>) + ; RV32-NEXT: PseudoRET implicit $v0 + ; + ; RV64-LABEL: name: splat_zero_nxv64i1 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR [[C]](s1) + ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s1>) + ; RV64-NEXT: PseudoRET implicit $v0 + ret <vscale x 64 x i1> zeroinitializer +} + +define <vscale x 1 x i8> @splat_zero_nxv1i8() { + ; RV32-LABEL: name: splat_zero_nxv1i8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s8) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv1i8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s8) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 1 x i8> zeroinitializer +} + +define <vscale x 2 x i8> @splat_zero_nxv2i8() { + ; RV32-LABEL: name: splat_zero_nxv2i8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s8) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv2i8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s8) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 2 x i8> zeroinitializer +} + +define <vscale x 4 x i8> @splat_zero_nxv4i8() { + ; RV32-LABEL: name: splat_zero_nxv4i8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s8) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv4i8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s8) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 4 x i8> zeroinitializer +} + +define <vscale x 8 x i8> @splat_zero_nxv8i8() { + ; RV32-LABEL: name: splat_zero_nxv8i8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s8) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv8i8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s8) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 8 x i8> zeroinitializer +} + +define <vscale x 16 x i8> @splat_zero_nxv16i8() { + ; RV32-LABEL: name: splat_zero_nxv16i8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s8) + ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: splat_zero_nxv16i8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s8) + ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + ret <vscale x 16 x i8> zeroinitializer +} + +define <vscale x 32 x i8> @splat_zero_nxv32i8() { + ; RV32-LABEL: name: splat_zero_nxv32i8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s8) + ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s8>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: splat_zero_nxv32i8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s8) + ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s8>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + ret <vscale x 32 x i8> zeroinitializer +} + +define <vscale x 64 x i8> @splat_zero_nxv64i8() { + ; RV32-LABEL: name: splat_zero_nxv64i8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s8) + ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s8>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: splat_zero_nxv64i8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s8) + ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s8>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + ret <vscale x 64 x i8> zeroinitializer +} + +define <vscale x 1 x i16> @splat_zero_nxv1i16() { + ; RV32-LABEL: name: splat_zero_nxv1i16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv1i16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 1 x i16> zeroinitializer +} + +define <vscale x 2 x i16> @splat_zero_nxv2i16() { + ; RV32-LABEL: name: splat_zero_nxv2i16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv2i16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 2 x i16> zeroinitializer +} + +define <vscale x 4 x i16> @splat_zero_nxv4i16() { + ; RV32-LABEL: name: splat_zero_nxv4i16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv4i16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 4 x i16> zeroinitializer +} + +define <vscale x 8 x i16> @splat_zero_nxv8i16() { + ; RV32-LABEL: name: splat_zero_nxv8i16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: splat_zero_nxv8i16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + ret <vscale x 8 x i16> zeroinitializer +} + +define <vscale x 16 x i16> @splat_zero_nxv16i16() { + ; RV32-LABEL: name: splat_zero_nxv16i16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: splat_zero_nxv16i16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + ret <vscale x 16 x i16> zeroinitializer +} + +define <vscale x 32 x i16> @splat_zero_nxv32i16() { + ; RV32-LABEL: name: splat_zero_nxv32i16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: splat_zero_nxv32i16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + ret <vscale x 32 x i16> zeroinitializer +} + +define <vscale x 1 x i32> @splat_zero_nxv1i32() { + ; RV32-LABEL: name: splat_zero_nxv1i32 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv1i32 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 1 x i32> zeroinitializer +} + +define <vscale x 2 x i32> @splat_zero_nxv2i32() { + ; RV32-LABEL: name: splat_zero_nxv2i32 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv2i32 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 2 x i32> zeroinitializer +} + +define <vscale x 4 x i32> @splat_zero_nxv4i32() { + ; RV32-LABEL: name: splat_zero_nxv4i32 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: splat_zero_nxv4i32 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + ret <vscale x 4 x i32> zeroinitializer +} + +define <vscale x 8 x i32> @splat_zero_nxv8i32() { + ; RV32-LABEL: name: splat_zero_nxv8i32 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: splat_zero_nxv8i32 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + ret <vscale x 8 x i32> zeroinitializer +} + +define <vscale x 16 x i32> @splat_zero_nxv16i32() { + ; RV32-LABEL: name: splat_zero_nxv16i32 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: splat_zero_nxv16i32 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + ret <vscale x 16 x i32> zeroinitializer +} + +define <vscale x 1 x i64> @splat_zero_nxv1i64() { + ; RV32-LABEL: name: splat_zero_nxv1i64 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv1i64 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 1 x i64> zeroinitializer +} + +define <vscale x 2 x i64> @splat_zero_nxv2i64() { + ; RV32-LABEL: name: splat_zero_nxv2i64 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: splat_zero_nxv2i64 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + ret <vscale x 2 x i64> zeroinitializer +} + +define <vscale x 4 x i64> @splat_zero_nxv4i64() { + ; RV32-LABEL: name: splat_zero_nxv4i64 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: splat_zero_nxv4i64 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + ret <vscale x 4 x i64> zeroinitializer +} + +define <vscale x 8 x i64> @splat_zero_nxv8i64() { + ; RV32-LABEL: name: splat_zero_nxv8i64 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: splat_zero_nxv8i64 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + ret <vscale x 8 x i64> zeroinitializer +} + +define <vscale x 1 x half> @splat_zero_nxv1half() { + ; RV32-LABEL: name: splat_zero_nxv1half + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv1half + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 1 x half> zeroinitializer +} + +define <vscale x 2 x half> @splat_zero_nxv2half() { + ; RV32-LABEL: name: splat_zero_nxv2half + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv2half + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 2 x half> zeroinitializer +} + +define <vscale x 4 x half> @splat_zero_nxv4half() { + ; RV32-LABEL: name: splat_zero_nxv4half + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv4half + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 4 x half> zeroinitializer +} + +define <vscale x 8 x half> @splat_zero_nxv8half() { + ; RV32-LABEL: name: splat_zero_nxv8half + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: splat_zero_nxv8half + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + ret <vscale x 8 x half> zeroinitializer +} + +define <vscale x 16 x half> @splat_zero_nxv16half() { + ; RV32-LABEL: name: splat_zero_nxv16half + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: splat_zero_nxv16half + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + ret <vscale x 16 x half> zeroinitializer +} + +define <vscale x 32 x half> @splat_zero_nxv32half() { + ; RV32-LABEL: name: splat_zero_nxv32half + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: splat_zero_nxv32half + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s16) + ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + ret <vscale x 32 x half> zeroinitializer +} + +define <vscale x 1 x float> @splat_zero_nxv1float() { + ; RV32-LABEL: name: splat_zero_nxv1float + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv1float + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 1 x float> zeroinitializer +} + +define <vscale x 2 x float> @splat_zero_nxv2float() { + ; RV32-LABEL: name: splat_zero_nxv2float + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv2float + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 2 x float> zeroinitializer +} + +define <vscale x 4 x float> @splat_zero_nxv4float() { + ; RV32-LABEL: name: splat_zero_nxv4float + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: splat_zero_nxv4float + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + ret <vscale x 4 x float> zeroinitializer +} + +define <vscale x 8 x float> @splat_zero_nxv8float() { + ; RV32-LABEL: name: splat_zero_nxv8float + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: splat_zero_nxv8float + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + ret <vscale x 8 x float> zeroinitializer +} + +define <vscale x 16 x float> @splat_zero_nxv16float() { + ; RV32-LABEL: name: splat_zero_nxv16float + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: splat_zero_nxv16float + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + ret <vscale x 16 x float> zeroinitializer +} + +define <vscale x 1 x double> @splat_zero_nxv1double() { + ; RV32-LABEL: name: splat_zero_nxv1double + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv1double + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 1 x double> zeroinitializer +} + +define <vscale x 2 x double> @splat_zero_nxv2double() { + ; RV32-LABEL: name: splat_zero_nxv2double + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: splat_zero_nxv2double + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + ret <vscale x 2 x double> zeroinitializer +} + +define <vscale x 4 x double> @splat_zero_nxv4double() { + ; RV32-LABEL: name: splat_zero_nxv4double + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: splat_zero_nxv4double + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + ret <vscale x 4 x double> zeroinitializer +} + +define <vscale x 8 x double> @splat_zero_nxv8double() { + ; RV32-LABEL: name: splat_zero_nxv8double + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: splat_zero_nxv8double + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + ret <vscale x 8 x double> zeroinitializer +} + +define <vscale x 1 x ptr> @splat_zero_nxv1ptr() { + ; RV32-LABEL: name: splat_zero_nxv1ptr + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x p0>) = G_SPLAT_VECTOR [[C]](p0) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x p0>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv1ptr + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x p0>) = G_SPLAT_VECTOR [[C]](p0) + ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x p0>) + ; RV64-NEXT: PseudoRET implicit $v8 + ret <vscale x 1 x ptr> zeroinitializer +} + +define <vscale x 2 x ptr> @splat_zero_nxv2ptr() { + ; RV32-LABEL: name: splat_zero_nxv2ptr + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x p0>) = G_SPLAT_VECTOR [[C]](p0) + ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x p0>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: splat_zero_nxv2ptr + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x p0>) = G_SPLAT_VECTOR [[C]](p0) + ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x p0>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + ret <vscale x 2 x ptr> zeroinitializer +} + +define <vscale x 4 x ptr> @splat_zero_nxv4ptr() { + ; RV32-LABEL: name: splat_zero_nxv4ptr + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x p0>) = G_SPLAT_VECTOR [[C]](p0) + ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x p0>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: splat_zero_nxv4ptr + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x p0>) = G_SPLAT_VECTOR [[C]](p0) + ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x p0>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + ret <vscale x 4 x ptr> zeroinitializer +} + +define <vscale x 8 x ptr> @splat_zero_nxv8ptr() { + ; RV32-LABEL: name: splat_zero_nxv8ptr + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x p0>) = G_SPLAT_VECTOR [[C]](p0) + ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x p0>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: splat_zero_nxv8ptr + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x p0>) = G_SPLAT_VECTOR [[C]](p0) + ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x p0>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + ret <vscale x 8 x ptr> zeroinitializer +} diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll index 0f3fdf0..6a0dbbe 100644 --- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll +++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll @@ -25,7 +25,7 @@ define void @last_chance_recoloring_failure() { ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 16 * vlenb ; CHECK-NEXT: li a0, 55 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vloxseg2ei32.v v16, (a1), v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 @@ -81,7 +81,7 @@ define void @last_chance_recoloring_failure() { ; SUBREGLIVENESS-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 16 * vlenb ; SUBREGLIVENESS-NEXT: li a0, 55 ; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; SUBREGLIVENESS-NEXT: vloxseg2ei32.v v16, (a0), v8 +; SUBREGLIVENESS-NEXT: vloxseg2ei32.v v16, (a1), v8 ; SUBREGLIVENESS-NEXT: csrr a0, vlenb ; SUBREGLIVENESS-NEXT: slli a0, a0, 3 ; SUBREGLIVENESS-NEXT: add a0, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll index dec6772..d613e4e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll @@ -1,47 +1,93 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN declare <vscale x 1 x half> @llvm.vp.ceil.nxv1f16(<vscale x 1 x half>, <vscale x 1 x i1>, i32) define <vscale x 1 x half> @vp_ceil_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv1f16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_ceil_vv_nxv1f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI0_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; ZVFH-NEXT: fsrmi a0, 3 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_ceil_vv_nxv1f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v9, v0.t +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a0, 3 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %v = call <vscale x 1 x half> @llvm.vp.ceil.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl) ret <vscale x 1 x half> %v } define <vscale x 1 x half> @vp_ceil_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv1f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_ceil_vv_nxv1f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI1_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; ZVFH-NEXT: fsrmi a0, 3 +; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_ceil_vv_nxv1f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v9 +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5 +; ZVFHMIN-NEXT: fsrmi a0, 3 +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %v = call <vscale x 1 x half> @llvm.vp.ceil.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl) ret <vscale x 1 x half> %v } @@ -49,41 +95,81 @@ define <vscale x 1 x half> @vp_ceil_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, declare <vscale x 2 x half> @llvm.vp.ceil.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32) define <vscale x 2 x half> @vp_ceil_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv2f16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_ceil_vv_nxv2f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI2_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; ZVFH-NEXT: fsrmi a0, 3 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_ceil_vv_nxv2f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v9, v0.t +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a0, 3 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %v = call <vscale x 2 x half> @llvm.vp.ceil.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl) ret <vscale x 2 x half> %v } define <vscale x 2 x half> @vp_ceil_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv2f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_ceil_vv_nxv2f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI3_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; ZVFH-NEXT: fsrmi a0, 3 +; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_ceil_vv_nxv2f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v9 +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5 +; ZVFHMIN-NEXT: fsrmi a0, 3 +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %v = call <vscale x 2 x half> @llvm.vp.ceil.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) ret <vscale x 2 x half> %v } @@ -91,41 +177,83 @@ define <vscale x 2 x half> @vp_ceil_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, declare <vscale x 4 x half> @llvm.vp.ceil.nxv4f16(<vscale x 4 x half>, <vscale x 4 x i1>, i32) define <vscale x 4 x half> @vp_ceil_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv4f16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI4_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_ceil_vv_nxv4f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI4_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; ZVFH-NEXT: fsrmi a0, 3 +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_ceil_vv_nxv4f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vmv1r.v v9, v0 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfabs.v v12, v10, v0.t +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a0, 3 +; ZVFHMIN-NEXT: vmv1r.v v0, v9 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v10, v12, v10, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %v = call <vscale x 4 x half> @llvm.vp.ceil.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl) ret <vscale x 4 x half> %v } define <vscale x 4 x half> @vp_ceil_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv4f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI5_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_ceil_vv_nxv4f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI5_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; ZVFH-NEXT: fsrmi a0, 3 +; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_ceil_vv_nxv4f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v10 +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5 +; ZVFHMIN-NEXT: fsrmi a0, 3 +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v10, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v10, v8, v10, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %v = call <vscale x 4 x half> @llvm.vp.ceil.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl) ret <vscale x 4 x half> %v } @@ -133,43 +261,85 @@ define <vscale x 4 x half> @vp_ceil_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, declare <vscale x 8 x half> @llvm.vp.ceil.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, i32) define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI6_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_ceil_vv_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI6_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1) +; ZVFH-NEXT: vmv1r.v v10, v0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfabs.v v12, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; ZVFH-NEXT: fsrmi a0, 3 +; ZVFH-NEXT: vmv1r.v v0, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_ceil_vv_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vmv1r.v v10, v0 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfabs.v v16, v12, v0.t +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a0, 3 +; ZVFHMIN-NEXT: vmv1r.v v0, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v12, v16, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %v = call <vscale x 8 x half> @llvm.vp.ceil.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl) ret <vscale x 8 x half> %v } define <vscale x 8 x half> @vp_ceil_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv8f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI7_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI7_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_ceil_vv_nxv8f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI7_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; ZVFH-NEXT: fsrmi a0, 3 +; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_ceil_vv_nxv8f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v12 +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5 +; ZVFHMIN-NEXT: fsrmi a0, 3 +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v12, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v12, v8, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %v = call <vscale x 8 x half> @llvm.vp.ceil.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl) ret <vscale x 8 x half> %v } @@ -177,43 +347,85 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, declare <vscale x 16 x half> @llvm.vp.ceil.nxv16f16(<vscale x 16 x half>, <vscale x 16 x i1>, i32) define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv16f16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI8_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI8_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_ceil_vv_nxv16f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI8_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a1) +; ZVFH-NEXT: vmv1r.v v12, v0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfabs.v v16, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; ZVFH-NEXT: fsrmi a0, 3 +; ZVFH-NEXT: vmv1r.v v0, v12 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_ceil_vv_nxv16f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vmv1r.v v12, v0 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a0, 3 +; ZVFHMIN-NEXT: vmv1r.v v0, v12 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %v = call <vscale x 16 x half> @llvm.vp.ceil.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl) ret <vscale x 16 x half> %v } define <vscale x 16 x half> @vp_ceil_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv16f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI9_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI9_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_ceil_vv_nxv16f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI9_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; ZVFH-NEXT: fsrmi a0, 3 +; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_ceil_vv_nxv16f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v16 +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5 +; ZVFHMIN-NEXT: fsrmi a0, 3 +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v16, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v16, v8, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %v = call <vscale x 16 x half> @llvm.vp.ceil.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl) ret <vscale x 16 x half> %v } @@ -221,43 +433,182 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16_unmasked(<vscale x 16 x half> % declare <vscale x 32 x half> @llvm.vp.ceil.nxv32f16(<vscale x 32 x half>, <vscale x 32 x i1>, i32) define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv32f16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI10_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI10_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_ceil_vv_nxv32f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI10_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a1) +; ZVFH-NEXT: vmv1r.v v16, v0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfabs.v v24, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; ZVFH-NEXT: fsrmi a0, 3 +; ZVFH-NEXT: vmv1r.v v0, v16 +; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma +; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_ceil_vv_nxv32f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vmv1r.v v16, v0 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v17, v0, a2 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vmv1r.v v0, v17 +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t +; ZVFHMIN-NEXT: lui a2, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a2 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a2, 3 +; ZVFHMIN-NEXT: vmv1r.v v0, v17 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t +; ZVFHMIN-NEXT: fsrm a2 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24 +; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB10_2: +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFHMIN-NEXT: vmv1r.v v8, v16 +; ZVFHMIN-NEXT: vmv1r.v v0, v16 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vmv1r.v v0, v8 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v8, v16, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a0, 3 +; ZVFHMIN-NEXT: vmv1r.v v0, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v24, v0.t +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v24, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %v = call <vscale x 32 x half> @llvm.vp.ceil.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl) ret <vscale x 32 x half> %v } define <vscale x 32 x half> @vp_ceil_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv32f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI11_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI11_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_ceil_vv_nxv32f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI11_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; ZVFH-NEXT: fsrmi a0, 3 +; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_ceil_vv_nxv32f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v16 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v16, v16, a2 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vmv1r.v v0, v16 +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t +; ZVFHMIN-NEXT: lui a2, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a2 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a2, 3 +; ZVFHMIN-NEXT: vmv1r.v v0, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t +; ZVFHMIN-NEXT: fsrm a2 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24 +; ZVFHMIN-NEXT: bltu a0, a1, .LBB11_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB11_2: +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfabs.v v24, v16 +; ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; ZVFHMIN-NEXT: fsrmi a0, 3 +; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %v = call <vscale x 32 x half> @llvm.vp.ceil.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl) ret <vscale x 32 x half> %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll index 5888252..9ea1394 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll @@ -2624,6 +2624,153 @@ define <vscale x 1 x i9> @vp_ctlz_zero_undef_nxv1i9(<vscale x 1 x i9> %va, <vsca %v = call <vscale x 1 x i9> @llvm.vp.ctlz.nxv1i9(<vscale x 1 x i9> %va, i1 true, <vscale x 1 x i1> %m, i32 %evl) ret <vscale x 1 x i9> %v } +define <vscale x 1 x i9> @vp_ctlo_nxv1i9(<vscale x 1 x i9> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlo_nxv1i9: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 7, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v8, v9, 23, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t +; CHECK-NEXT: li a0, 142 +; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vp_ctlo_nxv1i9: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vsll.vi v8, v8, 7, v0.t +; CHECK-ZVBB-NEXT: vnot.v v8, v8, v0.t +; CHECK-ZVBB-NEXT: vclz.v v8, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %va.not = xor <vscale x 1 x i9> %va, splat (i9 -1) + %v = call <vscale x 1 x i9> @llvm.vp.ctlz.nxv1i9(<vscale x 1 x i9> %va.not, i1 false, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x i9> %v +} +define <vscale x 1 x i9> @vp_ctlo_zero_undef_nxv1i9(<vscale x 1 x i9> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlo_zero_undef_nxv1i9: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 511 +; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; CHECK-NEXT: vxor.vx v8, v8, a1 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 7, v0.t +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v8, v9, 23, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t +; CHECK-NEXT: li a0, 142 +; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vp_ctlo_zero_undef_nxv1i9: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: li a1, 511 +; CHECK-ZVBB-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vxor.vx v8, v8, a1 +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vsll.vi v8, v8, 7, v0.t +; CHECK-ZVBB-NEXT: vclz.v v8, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %va.not = xor <vscale x 1 x i9> %va, splat (i9 -1) + %v = call <vscale x 1 x i9> @llvm.vp.ctlz.nxv1i9(<vscale x 1 x i9> %va.not, i1 true, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x i9> %v +} + +define <vscale x 1 x i9> @vp_ctlo_nxv1i9_vp_xor(<vscale x 1 x i9> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlo_nxv1i9_vp_xor: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 7, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v8, v9, 23, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t +; CHECK-NEXT: li a0, 142 +; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vp_ctlo_nxv1i9_vp_xor: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vsll.vi v8, v8, 7, v0.t +; CHECK-ZVBB-NEXT: vnot.v v8, v8, v0.t +; CHECK-ZVBB-NEXT: vclz.v v8, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %va.not = call <vscale x 1 x i9> @llvm.vp.xor.nxv1i9(<vscale x 1 x i9> %va, <vscale x 1 x i9> splat (i9 -1), <vscale x 1 x i1> %m, i32 %evl) + %v = call <vscale x 1 x i9> @llvm.vp.ctlz.nxv1i9(<vscale x 1 x i9> %va.not, i1 false, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x i9> %v +} + +define <vscale x 1 x i9> @vp_ctlo_zero_undef_nxv1i9_vp_xor(<vscale x 1 x i9> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlo_zero_undef_nxv1i9_vp_xor: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 511 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vxor.vx v8, v8, a1, v0.t +; CHECK-NEXT: vsll.vi v8, v8, 7, v0.t +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v8, v9, 23, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t +; CHECK-NEXT: li a0, 142 +; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vp_ctlo_zero_undef_nxv1i9_vp_xor: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: li a1, 511 +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vxor.vx v8, v8, a1, v0.t +; CHECK-ZVBB-NEXT: vsll.vi v8, v8, 7, v0.t +; CHECK-ZVBB-NEXT: vclz.v v8, v8, v0.t +; CHECK-ZVBB-NEXT: ret + %va.not = call <vscale x 1 x i9> @llvm.vp.xor.nxv1i9(<vscale x 1 x i9> %va, <vscale x 1 x i9> splat (i9 -1), <vscale x 1 x i1> %m, i32 %evl) + %v = call <vscale x 1 x i9> @llvm.vp.ctlz.nxv1i9(<vscale x 1 x i9> %va.not, i1 true, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x i9> %v +} + +define <vscale x 1 x i9> @vp_ctlo_zero_nxv1i9_unpredicated_ctlz_with_vp_xor(<vscale x 1 x i9> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlo_zero_nxv1i9_unpredicated_ctlz_with_vp_xor: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 511 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vxor.vx v8, v8, a1, v0.t +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a1 +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vnsrl.wi v8, v9, 23 +; CHECK-NEXT: li a0, 142 +; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vp_ctlo_zero_nxv1i9_unpredicated_ctlz_with_vp_xor: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: li a1, 511 +; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vxor.vx v8, v8, a1, v0.t +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vand.vx v8, v8, a1 +; CHECK-ZVBB-NEXT: vclz.v v8, v8 +; CHECK-ZVBB-NEXT: li a0, 7 +; CHECK-ZVBB-NEXT: vsub.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %va.not = call <vscale x 1 x i9> @llvm.vp.xor.nxv1i9(<vscale x 1 x i9> %va, <vscale x 1 x i9> splat (i9 -1), <vscale x 1 x i1> %m, i32 %evl) + %v = call <vscale x 1 x i9> @llvm.ctlz(<vscale x 1 x i9> %va.not, i1 false) + ret <vscale x 1 x i9> %v +} + ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; RV32: {{.*}} ; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll index 26a3e05..45334ea 100644 --- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll @@ -1,47 +1,93 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN declare <vscale x 1 x half> @llvm.vp.floor.nxv1f16(<vscale x 1 x half>, <vscale x 1 x i1>, i32) define <vscale x 1 x half> @vp_floor_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv1f16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_floor_nxv1f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI0_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; ZVFH-NEXT: fsrmi a0, 2 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_floor_nxv1f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v9, v0.t +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a0, 2 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %v = call <vscale x 1 x half> @llvm.vp.floor.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl) ret <vscale x 1 x half> %v } define <vscale x 1 x half> @vp_floor_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv1f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_floor_nxv1f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI1_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; ZVFH-NEXT: fsrmi a0, 2 +; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_floor_nxv1f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v9 +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5 +; ZVFHMIN-NEXT: fsrmi a0, 2 +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %v = call <vscale x 1 x half> @llvm.vp.floor.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl) ret <vscale x 1 x half> %v } @@ -49,41 +95,81 @@ define <vscale x 1 x half> @vp_floor_nxv1f16_unmasked(<vscale x 1 x half> %va, i declare <vscale x 2 x half> @llvm.vp.floor.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32) define <vscale x 2 x half> @vp_floor_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv2f16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_floor_nxv2f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI2_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; ZVFH-NEXT: fsrmi a0, 2 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_floor_nxv2f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v9, v0.t +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a0, 2 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %v = call <vscale x 2 x half> @llvm.vp.floor.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl) ret <vscale x 2 x half> %v } define <vscale x 2 x half> @vp_floor_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv2f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_floor_nxv2f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI3_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; ZVFH-NEXT: fsrmi a0, 2 +; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_floor_nxv2f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v9 +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5 +; ZVFHMIN-NEXT: fsrmi a0, 2 +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %v = call <vscale x 2 x half> @llvm.vp.floor.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) ret <vscale x 2 x half> %v } @@ -91,41 +177,83 @@ define <vscale x 2 x half> @vp_floor_nxv2f16_unmasked(<vscale x 2 x half> %va, i declare <vscale x 4 x half> @llvm.vp.floor.nxv4f16(<vscale x 4 x half>, <vscale x 4 x i1>, i32) define <vscale x 4 x half> @vp_floor_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv4f16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI4_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_floor_nxv4f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI4_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; ZVFH-NEXT: fsrmi a0, 2 +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_floor_nxv4f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vmv1r.v v9, v0 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfabs.v v12, v10, v0.t +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a0, 2 +; ZVFHMIN-NEXT: vmv1r.v v0, v9 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v10, v12, v10, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %v = call <vscale x 4 x half> @llvm.vp.floor.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl) ret <vscale x 4 x half> %v } define <vscale x 4 x half> @vp_floor_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv4f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI5_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_floor_nxv4f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI5_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; ZVFH-NEXT: fsrmi a0, 2 +; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_floor_nxv4f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v10 +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5 +; ZVFHMIN-NEXT: fsrmi a0, 2 +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v10, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v10, v8, v10, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %v = call <vscale x 4 x half> @llvm.vp.floor.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl) ret <vscale x 4 x half> %v } @@ -133,43 +261,85 @@ define <vscale x 4 x half> @vp_floor_nxv4f16_unmasked(<vscale x 4 x half> %va, i declare <vscale x 8 x half> @llvm.vp.floor.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, i32) define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI6_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_floor_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI6_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1) +; ZVFH-NEXT: vmv1r.v v10, v0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfabs.v v12, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; ZVFH-NEXT: fsrmi a0, 2 +; ZVFH-NEXT: vmv1r.v v0, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_floor_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vmv1r.v v10, v0 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfabs.v v16, v12, v0.t +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a0, 2 +; ZVFHMIN-NEXT: vmv1r.v v0, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v12, v16, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %v = call <vscale x 8 x half> @llvm.vp.floor.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl) ret <vscale x 8 x half> %v } define <vscale x 8 x half> @vp_floor_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv8f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI7_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI7_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_floor_nxv8f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI7_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; ZVFH-NEXT: fsrmi a0, 2 +; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_floor_nxv8f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v12 +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5 +; ZVFHMIN-NEXT: fsrmi a0, 2 +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v12, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v12, v8, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %v = call <vscale x 8 x half> @llvm.vp.floor.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl) ret <vscale x 8 x half> %v } @@ -177,43 +347,85 @@ define <vscale x 8 x half> @vp_floor_nxv8f16_unmasked(<vscale x 8 x half> %va, i declare <vscale x 16 x half> @llvm.vp.floor.nxv16f16(<vscale x 16 x half>, <vscale x 16 x i1>, i32) define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv16f16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI8_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI8_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_floor_nxv16f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI8_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a1) +; ZVFH-NEXT: vmv1r.v v12, v0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfabs.v v16, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; ZVFH-NEXT: fsrmi a0, 2 +; ZVFH-NEXT: vmv1r.v v0, v12 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_floor_nxv16f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vmv1r.v v12, v0 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a0, 2 +; ZVFHMIN-NEXT: vmv1r.v v0, v12 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %v = call <vscale x 16 x half> @llvm.vp.floor.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl) ret <vscale x 16 x half> %v } define <vscale x 16 x half> @vp_floor_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv16f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI9_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI9_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_floor_nxv16f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI9_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; ZVFH-NEXT: fsrmi a0, 2 +; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_floor_nxv16f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v16 +; ZVFHMIN-NEXT: lui a0, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a0 +; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5 +; ZVFHMIN-NEXT: fsrmi a0, 2 +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v16, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v16, v8, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %v = call <vscale x 16 x half> @llvm.vp.floor.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl) ret <vscale x 16 x half> %v } @@ -221,43 +433,182 @@ define <vscale x 16 x half> @vp_floor_nxv16f16_unmasked(<vscale x 16 x half> %va declare <vscale x 32 x half> @llvm.vp.floor.nxv32f16(<vscale x 32 x half>, <vscale x 32 x i1>, i32) define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv32f16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI10_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI10_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_floor_nxv32f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI10_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a1) +; ZVFH-NEXT: vmv1r.v v16, v0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfabs.v v24, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; ZVFH-NEXT: fsrmi a0, 2 +; ZVFH-NEXT: vmv1r.v v0, v16 +; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma +; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_floor_nxv32f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vmv1r.v v16, v0 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v17, v0, a2 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vmv1r.v v0, v17 +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t +; ZVFHMIN-NEXT: lui a2, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a2 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a2, 2 +; ZVFHMIN-NEXT: vmv1r.v v0, v17 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t +; ZVFHMIN-NEXT: fsrm a2 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24 +; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB10_2: +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFHMIN-NEXT: vmv1r.v v8, v16 +; ZVFHMIN-NEXT: vmv1r.v v0, v16 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vmv1r.v v0, v8 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v8, v16, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a0, 2 +; ZVFHMIN-NEXT: vmv1r.v v0, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v24, v0.t +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v24, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %v = call <vscale x 32 x half> @llvm.vp.floor.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl) ret <vscale x 32 x half> %v } define <vscale x 32 x half> @vp_floor_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv32f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI11_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI11_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vp_floor_nxv32f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: lui a1, %hi(.LCPI11_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a1) +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; ZVFH-NEXT: fsrmi a0, 2 +; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; ZVFH-NEXT: fsrm a0 +; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vp_floor_nxv32f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v16 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v16, v16, a2 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vmv1r.v v0, v16 +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t +; ZVFHMIN-NEXT: lui a2, 307200 +; ZVFHMIN-NEXT: fmv.w.x fa5, a2 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t +; ZVFHMIN-NEXT: fsrmi a2, 2 +; ZVFHMIN-NEXT: vmv1r.v v0, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t +; ZVFHMIN-NEXT: fsrm a2 +; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24 +; ZVFHMIN-NEXT: bltu a0, a1, .LBB11_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB11_2: +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfabs.v v24, v16 +; ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; ZVFHMIN-NEXT: fsrmi a0, 2 +; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; ZVFHMIN-NEXT: fsrm a0 +; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %v = call <vscale x 32 x half> @llvm.vp.floor.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl) ret <vscale x 32 x half> %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll index 8201f18..b6bb037 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll @@ -1,38 +1,82 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN declare <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x i1>, i32) define <vscale x 1 x half> @vfma_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vv_nxv1f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vv_nxv1f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfmadd.vv v9, v8, v10, v0.t +; ZVFH-NEXT: vmv1r.v v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vv_nxv1f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v10, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl) ret <vscale x 1 x half> %v } define <vscale x 1 x half> @vfma_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vv_nxv1f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfmadd.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vv_nxv1f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfmadd.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vv_nxv1f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v10, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl) ret <vscale x 1 x half> %v } define <vscale x 1 x half> @vfma_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv1f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv1f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv1f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl) @@ -40,11 +84,25 @@ define <vscale x 1 x half> @vfma_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <v } define <vscale x 1 x half> @vfma_vf_nxv1f16_commute(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv1f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv1f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv1f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v8, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl) @@ -52,11 +110,25 @@ define <vscale x 1 x half> @vfma_vf_nxv1f16_commute(<vscale x 1 x half> %va, hal } define <vscale x 1 x half> @vfma_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv1f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv1f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv1f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -64,11 +136,25 @@ define <vscale x 1 x half> @vfma_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, ha } define <vscale x 1 x half> @vfma_vf_nxv1f16_unmasked_commute(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv1f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv1f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv1f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -78,32 +164,70 @@ define <vscale x 1 x half> @vfma_vf_nxv1f16_unmasked_commute(<vscale x 1 x half> declare <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x i1>, i32) define <vscale x 2 x half> @vfma_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vv_nxv2f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vv_nxv2f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfmadd.vv v9, v8, v10, v0.t +; ZVFH-NEXT: vmv1r.v v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vv_nxv2f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v10, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl) ret <vscale x 2 x half> %v } define <vscale x 2 x half> @vfma_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vv_nxv2f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfmadd.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vv_nxv2f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfmadd.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vv_nxv2f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v10, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl) ret <vscale x 2 x half> %v } define <vscale x 2 x half> @vfma_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv2f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv2f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv2f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl) @@ -111,11 +235,25 @@ define <vscale x 2 x half> @vfma_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <v } define <vscale x 2 x half> @vfma_vf_nxv2f16_commute(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv2f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv2f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv2f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v8, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl) @@ -123,11 +261,25 @@ define <vscale x 2 x half> @vfma_vf_nxv2f16_commute(<vscale x 2 x half> %va, hal } define <vscale x 2 x half> @vfma_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv2f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv2f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv2f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -135,11 +287,25 @@ define <vscale x 2 x half> @vfma_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, ha } define <vscale x 2 x half> @vfma_vf_nxv2f16_unmasked_commute(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv2f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv2f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv2f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -149,32 +315,70 @@ define <vscale x 2 x half> @vfma_vf_nxv2f16_unmasked_commute(<vscale x 2 x half> declare <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x i1>, i32) define <vscale x 4 x half> @vfma_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vv_nxv4f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv.v.v v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vv_nxv4f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfmadd.vv v9, v8, v10, v0.t +; ZVFH-NEXT: vmv.v.v v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vv_nxv4f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v10, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl) ret <vscale x 4 x half> %v } define <vscale x 4 x half> @vfma_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vv_nxv4f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfmadd.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vv_nxv4f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfmadd.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vv_nxv4f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v10, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl) ret <vscale x 4 x half> %v } define <vscale x 4 x half> @vfma_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv4f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv4f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv4f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v14, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl) @@ -182,11 +386,25 @@ define <vscale x 4 x half> @vfma_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <v } define <vscale x 4 x half> @vfma_vf_nxv4f16_commute(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv4f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv4f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv4f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v8, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl) @@ -194,11 +412,25 @@ define <vscale x 4 x half> @vfma_vf_nxv4f16_commute(<vscale x 4 x half> %va, hal } define <vscale x 4 x half> @vfma_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv4f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv4f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv4f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v14, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -206,11 +438,25 @@ define <vscale x 4 x half> @vfma_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, ha } define <vscale x 4 x half> @vfma_vf_nxv4f16_unmasked_commute(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv4f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv4f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv4f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v14, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -220,32 +466,70 @@ define <vscale x 4 x half> @vfma_vf_nxv4f16_unmasked_commute(<vscale x 4 x half> declare <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, i32) define <vscale x 8 x half> @vfma_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vv_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vv_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfmadd.vv v10, v8, v12, v0.t +; ZVFH-NEXT: vmv.v.v v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vv_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v12, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl) ret <vscale x 8 x half> %v } define <vscale x 8 x half> @vfma_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vv_nxv8f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfmadd.vv v8, v10, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vv_nxv8f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfmadd.vv v8, v10, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vv_nxv8f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v12, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl) ret <vscale x 8 x half> %v } define <vscale x 8 x half> @vfma_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v20, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl) @@ -253,11 +537,25 @@ define <vscale x 8 x half> @vfma_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <v } define <vscale x 8 x half> @vfma_vf_nxv8f16_commute(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv8f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv8f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv8f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v8, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl) @@ -265,11 +563,25 @@ define <vscale x 8 x half> @vfma_vf_nxv8f16_commute(<vscale x 8 x half> %va, hal } define <vscale x 8 x half> @vfma_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv8f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv8f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv8f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v20, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -277,11 +589,25 @@ define <vscale x 8 x half> @vfma_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, ha } define <vscale x 8 x half> @vfma_vf_nxv8f16_unmasked_commute(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv8f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv8f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv8f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v20, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -291,32 +617,96 @@ define <vscale x 8 x half> @vfma_vf_nxv8f16_unmasked_commute(<vscale x 8 x half> declare <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x i1>, i32) define <vscale x 16 x half> @vfma_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vv_nxv16f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfmadd.vv v12, v8, v16, v0.t -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vv_nxv16f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfmadd.vv v12, v8, v16, v0.t +; ZVFH-NEXT: vmv.v.v v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vv_nxv16f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl) ret <vscale x 16 x half> %v } define <vscale x 16 x half> @vfma_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vv_nxv16f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfmadd.vv v8, v12, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vv_nxv16f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfmadd.vv v8, v12, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vv_nxv16f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: ret %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl) ret <vscale x 16 x half> %v } define <vscale x 16 x half> @vfma_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv16f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv16f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv16f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v4, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl) @@ -324,11 +714,25 @@ define <vscale x 16 x half> @vfma_vf_nxv16f16(<vscale x 16 x half> %va, half %b, } define <vscale x 16 x half> @vfma_vf_nxv16f16_commute(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv16f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv16f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv16f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v4, a1 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl) @@ -336,11 +740,38 @@ define <vscale x 16 x half> @vfma_vf_nxv16f16_commute(<vscale x 16 x half> %va, } define <vscale x 16 x half> @vfma_vf_nxv16f16_unmasked(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv16f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv16f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv16f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 2 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 2 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -348,11 +779,38 @@ define <vscale x 16 x half> @vfma_vf_nxv16f16_unmasked(<vscale x 16 x half> %va, } define <vscale x 16 x half> @vfma_vf_nxv16f16_unmasked_commute(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv16f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv16f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv16f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 2 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 2 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -362,34 +820,397 @@ define <vscale x 16 x half> @vfma_vf_nxv16f16_unmasked_commute(<vscale x 16 x ha declare <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x i1>, i32) define <vscale x 32 x half> @vfma_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vv_nxv32f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vv_nxv32f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vl8re16.v v24, (a0) +; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFH-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFH-NEXT: vmv.v.v v8, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vv_nxv32f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a3, 42 +; ZVFHMIN-NEXT: mul a2, a2, a3 +; ZVFHMIN-NEXT: sub sp, sp, a2 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x2a, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 42 * vlenb +; ZVFHMIN-NEXT: vmv1r.v v24, v0 +; ZVFHMIN-NEXT: vl8re16.v v0, (a0) +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a0, a2, 1 +; ZVFHMIN-NEXT: sub a3, a1, a0 +; ZVFHMIN-NEXT: sltu a4, a1, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs1r.v v24, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v24, v24, a2 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs1r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 25 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vmv8r.v v8, v16 +; ZVFHMIN-NEXT: vmv4r.v v24, v8 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a4, a2, 5 +; ZVFHMIN-NEXT: add a2, a4, a2 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a4, a2, 4 +; ZVFHMIN-NEXT: add a2, a4, a2 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v0, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl1r.v v0, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a1, a0, .LBB30_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a1, a0 +; ZVFHMIN-NEXT: .LBB30_2: +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 25 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a2, a0, 5 +; ZVFHMIN-NEXT: add a0, a2, a0 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a2, a0, 4 +; ZVFHMIN-NEXT: add a0, a2, a0 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a2, a0, 5 +; ZVFHMIN-NEXT: add a0, a2, a0 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a2, a0, 5 +; ZVFHMIN-NEXT: add a0, a2, a0 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vmv.v.v v16, v8 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a1, 42 +; ZVFHMIN-NEXT: mul a0, a0, a1 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl) ret <vscale x 32 x half> %v } define <vscale x 32 x half> @vfma_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vv_nxv32f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vfmadd.vv v8, v16, v24 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vv_nxv32f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vl8re16.v v24, (a0) +; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFH-NEXT: vfmadd.vv v8, v16, v24 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vv_nxv32f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 5 +; ZVFHMIN-NEXT: sub sp, sp, a2 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: vmv8r.v v24, v16 +; ZVFHMIN-NEXT: vl8re16.v v16, (a0) +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a0, a2, 1 +; ZVFHMIN-NEXT: sub a3, a1, a0 +; ZVFHMIN-NEXT: sltu a4, a1, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v7 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: bltu a1, a0, .LBB31_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a1, a0 +; ZVFHMIN-NEXT: .LBB31_2: +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmacc.vv v0, v16, v24 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl) ret <vscale x 32 x half> %v } define <vscale x 32 x half> @vfma_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv32f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv32f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv32f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 42 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x2a, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 42 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 5 +; ZVFHMIN-NEXT: add a1, a2, a1 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs1r.v v0, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 25 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a4, a2, 4 +; ZVFHMIN-NEXT: add a2, a4, a2 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a4, a2, 5 +; ZVFHMIN-NEXT: add a2, a4, a2 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a0, a1, .LBB32_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB32_2: +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 25 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 4 +; ZVFHMIN-NEXT: add a1, a2, a1 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 25 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 5 +; ZVFHMIN-NEXT: add a1, a2, a1 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 25 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vmv.v.v v16, v8 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a1, 42 +; ZVFHMIN-NEXT: mul a0, a0, a1 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl) @@ -397,11 +1218,147 @@ define <vscale x 32 x half> @vfma_vf_nxv32f16(<vscale x 32 x half> %va, half %b, } define <vscale x 32 x half> @vfma_vf_nxv32f16_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv32f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv32f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv32f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 42 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x2a, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 42 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 5 +; ZVFHMIN-NEXT: add a1, a2, a1 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs1r.v v0, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 25 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a4, a2, 4 +; ZVFHMIN-NEXT: add a2, a4, a2 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a4, a2, 5 +; ZVFHMIN-NEXT: add a2, a4, a2 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a0, a1, .LBB33_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB33_2: +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 25 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 4 +; ZVFHMIN-NEXT: add a1, a2, a1 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 5 +; ZVFHMIN-NEXT: add a1, a2, a1 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 25 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 25 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v16, v0.t +; ZVFHMIN-NEXT: vmv.v.v v16, v8 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a1, 42 +; ZVFHMIN-NEXT: mul a0, a0, a1 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl) @@ -409,11 +1366,104 @@ define <vscale x 32 x half> @vfma_vf_nxv32f16_commute(<vscale x 32 x half> %va, } define <vscale x 32 x half> @vfma_vf_nxv32f16_unmasked(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv32f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv32f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv32f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vmset.m v7 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8 +; ZVFHMIN-NEXT: bltu a0, a1, .LBB34_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB34_2: +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v8 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v0 +; ZVFHMIN-NEXT: vmv8r.v v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -421,11 +1471,103 @@ define <vscale x 32 x half> @vfma_vf_nxv32f16_unmasked(<vscale x 32 x half> %va, } define <vscale x 32 x half> @vfma_vf_nxv32f16_unmasked_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vf_nxv32f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfmadd.vf v8, fa0, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vf_nxv32f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfmadd.vf v8, fa0, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vf_nxv32f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vmset.m v7 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: bltu a0, a1, .LBB35_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB35_2: +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -1274,34 +2416,82 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double> declare <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half>, <vscale x 1 x i1>, i32) define <vscale x 1 x half> @vfmsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vv_nxv1f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfmsub.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vv_nxv1f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfmsub.vv v9, v8, v10, v0.t +; ZVFH-NEXT: vmv1r.v v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vv_nxv1f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v10, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl) %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> %m, i32 %evl) ret <vscale x 1 x half> %v } define <vscale x 1 x half> @vfmsub_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vv_nxv1f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfmsub.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vv_nxv1f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfmsub.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vv_nxv1f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v10, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl) ret <vscale x 1 x half> %v } define <vscale x 1 x half> @vfmsub_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv1f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv1f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv1f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl) @@ -1310,11 +2500,29 @@ define <vscale x 1 x half> @vfmsub_vf_nxv1f16(<vscale x 1 x half> %va, half %b, } define <vscale x 1 x half> @vfmsub_vf_nxv1f16_commute(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv1f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv1f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv1f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v8, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl) @@ -1323,11 +2531,29 @@ define <vscale x 1 x half> @vfmsub_vf_nxv1f16_commute(<vscale x 1 x half> %va, h } define <vscale x 1 x half> @vfmsub_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv1f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv1f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv1f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -1336,11 +2562,29 @@ define <vscale x 1 x half> @vfmsub_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, } define <vscale x 1 x half> @vfmsub_vf_nxv1f16_unmasked_commute(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv1f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv1f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv1f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -1349,12 +2593,28 @@ define <vscale x 1 x half> @vfmsub_vf_nxv1f16_unmasked_commute(<vscale x 1 x hal } define <vscale x 1 x half> @vfnmadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv1f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv1f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; ZVFH-NEXT: vmv1r.v v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv1f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v10, v9, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl) %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl) %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negb, <vscale x 1 x half> %negc, <vscale x 1 x i1> %m, i32 %evl) @@ -1362,11 +2622,27 @@ define <vscale x 1 x half> @vfnmadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale } define <vscale x 1 x half> @vfnmadd_vv_nxv1f16_commuted(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv1f16_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv1f16_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv1f16_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v10, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl) %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl) %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negb, <vscale x 1 x half> %va, <vscale x 1 x half> %negc, <vscale x 1 x i1> %m, i32 %evl) @@ -1374,11 +2650,27 @@ define <vscale x 1 x half> @vfnmadd_vv_nxv1f16_commuted(<vscale x 1 x half> %va, } define <vscale x 1 x half> @vfnmadd_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv1f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv1f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv1f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v10, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negb, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -1386,11 +2678,27 @@ define <vscale x 1 x half> @vfnmadd_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, } define <vscale x 1 x half> @vfnmadd_vv_nxv1f16_unmasked_commuted(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv1f16_unmasked_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv1f16_unmasked_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv1f16_unmasked_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v10, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negb, <vscale x 1 x half> %va, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -1398,11 +2706,30 @@ define <vscale x 1 x half> @vfnmadd_vv_nxv1f16_unmasked_commuted(<vscale x 1 x h } define <vscale x 1 x half> @vfnmadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv1f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv1f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv1f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl) @@ -1412,11 +2739,30 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b, } define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_commute(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv1f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv1f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv1f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v8, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl) @@ -1426,11 +2772,30 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_commute(<vscale x 1 x half> %va, } define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv1f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv1f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv1f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -1440,11 +2805,30 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, } define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_unmasked_commute(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv1f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv1f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv1f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -1454,11 +2838,30 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_unmasked_commute(<vscale x 1 x ha } define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_neg_splat(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv1f16_neg_splat: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv1f16_neg_splat: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv1f16_neg_splat: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v10, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl) @@ -1468,11 +2871,30 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_neg_splat(<vscale x 1 x half> %va } define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_neg_splat_commute(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv1f16_neg_splat_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv1f16_neg_splat_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv1f16_neg_splat_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v10, v9, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl) @@ -1482,11 +2904,30 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_neg_splat_commute(<vscale x 1 x h } define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_neg_splat_unmasked(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv1f16_neg_splat_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv1f16_neg_splat_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv1f16_neg_splat_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v10, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -1496,11 +2937,30 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_neg_splat_unmasked(<vscale x 1 x } define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_neg_splat_unmasked_commute(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv1f16_neg_splat_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv1f16_neg_splat_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv1f16_neg_splat_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v10, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -1510,12 +2970,28 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_neg_splat_unmasked_commute(<vscal } define <vscale x 1 x half> @vfnmsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv1f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv1f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; ZVFH-NEXT: vmv1r.v v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv1f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v10, v9, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl) %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl) %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negb, <vscale x 1 x half> %negc, <vscale x 1 x i1> %m, i32 %evl) @@ -1523,11 +2999,27 @@ define <vscale x 1 x half> @vfnmsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale } define <vscale x 1 x half> @vfnmsub_vv_nxv1f16_commuted(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv1f16_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv1f16_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv1f16_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v10, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl) %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl) %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negb, <vscale x 1 x half> %va, <vscale x 1 x half> %negc, <vscale x 1 x i1> %m, i32 %evl) @@ -1535,11 +3027,27 @@ define <vscale x 1 x half> @vfnmsub_vv_nxv1f16_commuted(<vscale x 1 x half> %va, } define <vscale x 1 x half> @vfnmsub_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv1f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv1f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv1f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v10, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negb, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -1547,11 +3055,27 @@ define <vscale x 1 x half> @vfnmsub_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, } define <vscale x 1 x half> @vfnmsub_vv_nxv1f16_unmasked_commuted(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv1f16_unmasked_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv1f16_unmasked_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv1f16_unmasked_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v10, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negb, <vscale x 1 x half> %va, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -1559,11 +3083,29 @@ define <vscale x 1 x half> @vfnmsub_vv_nxv1f16_unmasked_commuted(<vscale x 1 x h } define <vscale x 1 x half> @vfnmsub_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv1f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv1f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv1f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v11, v8, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl) @@ -1572,11 +3114,29 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16(<vscale x 1 x half> %va, half %b, } define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_commute(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv1f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv1f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv1f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v11, v9, v8, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v11 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl) @@ -1585,11 +3145,29 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_commute(<vscale x 1 x half> %va, } define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv1f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv1f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv1f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v11, v8 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -1598,11 +3176,29 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, } define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_unmasked_commute(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv1f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv1f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv1f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v11, v8 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -1611,11 +3207,29 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_unmasked_commute(<vscale x 1 x ha } define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_neg_splat(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv1f16_neg_splat: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv1f16_neg_splat: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv1f16_neg_splat: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v11, v9, v10, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v11 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl) @@ -1624,11 +3238,29 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_neg_splat(<vscale x 1 x half> %va } define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_neg_splat_commute(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv1f16_neg_splat_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv1f16_neg_splat_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv1f16_neg_splat_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v11, v10, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl) @@ -1637,11 +3269,29 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_neg_splat_commute(<vscale x 1 x h } define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_neg_splat_unmasked(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv1f16_neg_splat_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv1f16_neg_splat_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv1f16_neg_splat_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v11, v10 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -1650,11 +3300,29 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_neg_splat_unmasked(<vscale x 1 x } define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute(<vscale x 1 x half> %va, half %b, <vscale x 1 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v11, v10 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl) @@ -1665,34 +3333,82 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute(<vscal declare <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32) define <vscale x 2 x half> @vfmsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vv_nxv2f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfmsub.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vv_nxv2f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfmsub.vv v9, v8, v10, v0.t +; ZVFH-NEXT: vmv1r.v v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vv_nxv2f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v10, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl) %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> %m, i32 %evl) ret <vscale x 2 x half> %v } define <vscale x 2 x half> @vfmsub_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vv_nxv2f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfmsub.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vv_nxv2f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfmsub.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vv_nxv2f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v10, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl) ret <vscale x 2 x half> %v } define <vscale x 2 x half> @vfmsub_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv2f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv2f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv2f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl) @@ -1701,11 +3417,29 @@ define <vscale x 2 x half> @vfmsub_vf_nxv2f16(<vscale x 2 x half> %va, half %b, } define <vscale x 2 x half> @vfmsub_vf_nxv2f16_commute(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv2f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv2f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv2f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v8, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl) @@ -1714,11 +3448,29 @@ define <vscale x 2 x half> @vfmsub_vf_nxv2f16_commute(<vscale x 2 x half> %va, h } define <vscale x 2 x half> @vfmsub_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv2f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv2f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv2f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -1727,11 +3479,29 @@ define <vscale x 2 x half> @vfmsub_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, } define <vscale x 2 x half> @vfmsub_vf_nxv2f16_unmasked_commute(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv2f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv2f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv2f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -1740,12 +3510,28 @@ define <vscale x 2 x half> @vfmsub_vf_nxv2f16_unmasked_commute(<vscale x 2 x hal } define <vscale x 2 x half> @vfnmadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv2f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv2f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; ZVFH-NEXT: vmv1r.v v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv2f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v10, v9, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl) %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl) %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negb, <vscale x 2 x half> %negc, <vscale x 2 x i1> %m, i32 %evl) @@ -1753,11 +3539,27 @@ define <vscale x 2 x half> @vfnmadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale } define <vscale x 2 x half> @vfnmadd_vv_nxv2f16_commuted(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv2f16_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv2f16_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv2f16_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v10, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl) %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl) %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negb, <vscale x 2 x half> %va, <vscale x 2 x half> %negc, <vscale x 2 x i1> %m, i32 %evl) @@ -1765,11 +3567,27 @@ define <vscale x 2 x half> @vfnmadd_vv_nxv2f16_commuted(<vscale x 2 x half> %va, } define <vscale x 2 x half> @vfnmadd_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv2f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv2f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv2f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v10, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negb, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -1777,11 +3595,27 @@ define <vscale x 2 x half> @vfnmadd_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, } define <vscale x 2 x half> @vfnmadd_vv_nxv2f16_unmasked_commuted(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv2f16_unmasked_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv2f16_unmasked_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv2f16_unmasked_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v10, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negb, <vscale x 2 x half> %va, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -1789,11 +3623,30 @@ define <vscale x 2 x half> @vfnmadd_vv_nxv2f16_unmasked_commuted(<vscale x 2 x h } define <vscale x 2 x half> @vfnmadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv2f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv2f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv2f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl) @@ -1803,11 +3656,30 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b, } define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_commute(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv2f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv2f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv2f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v8, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl) @@ -1817,11 +3689,30 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_commute(<vscale x 2 x half> %va, } define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv2f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv2f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv2f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -1831,11 +3722,30 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, } define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_unmasked_commute(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv2f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv2f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv2f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -1845,11 +3755,30 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_unmasked_commute(<vscale x 2 x ha } define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_neg_splat(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv2f16_neg_splat: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv2f16_neg_splat: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv2f16_neg_splat: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v10, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl) @@ -1859,11 +3788,30 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_neg_splat(<vscale x 2 x half> %va } define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_neg_splat_commute(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv2f16_neg_splat_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv2f16_neg_splat_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv2f16_neg_splat_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v10, v9, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl) @@ -1873,11 +3821,30 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_neg_splat_commute(<vscale x 2 x h } define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_neg_splat_unmasked(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv2f16_neg_splat_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv2f16_neg_splat_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv2f16_neg_splat_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v10, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -1887,11 +3854,30 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_neg_splat_unmasked(<vscale x 2 x } define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_neg_splat_unmasked_commute(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv2f16_neg_splat_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv2f16_neg_splat_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv2f16_neg_splat_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v10, v9, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -1901,12 +3887,28 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_neg_splat_unmasked_commute(<vscal } define <vscale x 2 x half> @vfnmsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv2f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv2f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; ZVFH-NEXT: vmv1r.v v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv2f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v10, v9, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl) %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl) %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negb, <vscale x 2 x half> %negc, <vscale x 2 x i1> %m, i32 %evl) @@ -1914,11 +3916,27 @@ define <vscale x 2 x half> @vfnmsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale } define <vscale x 2 x half> @vfnmsub_vv_nxv2f16_commuted(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv2f16_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv2f16_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv2f16_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v10, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl) %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl) %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negb, <vscale x 2 x half> %va, <vscale x 2 x half> %negc, <vscale x 2 x i1> %m, i32 %evl) @@ -1926,11 +3944,27 @@ define <vscale x 2 x half> @vfnmsub_vv_nxv2f16_commuted(<vscale x 2 x half> %va, } define <vscale x 2 x half> @vfnmsub_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv2f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv2f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv2f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v10, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negb, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -1938,11 +3972,27 @@ define <vscale x 2 x half> @vfnmsub_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, } define <vscale x 2 x half> @vfnmsub_vv_nxv2f16_unmasked_commuted(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv2f16_unmasked_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv2f16_unmasked_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv2f16_unmasked_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v10, v11 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negb, <vscale x 2 x half> %va, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -1950,11 +4000,29 @@ define <vscale x 2 x half> @vfnmsub_vv_nxv2f16_unmasked_commuted(<vscale x 2 x h } define <vscale x 2 x half> @vfnmsub_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv2f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv2f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv2f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v11, v8, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl) @@ -1963,11 +4031,29 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16(<vscale x 2 x half> %va, half %b, } define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_commute(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv2f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv2f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv2f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v11, v9, v8, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v11 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl) @@ -1976,11 +4062,29 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_commute(<vscale x 2 x half> %va, } define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv2f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv2f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv2f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v11, v8 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -1989,11 +4093,29 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, } define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_unmasked_commute(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv2f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv2f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv2f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v11, v8 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -2002,11 +4124,29 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_unmasked_commute(<vscale x 2 x ha } define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_neg_splat(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv2f16_neg_splat: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv2f16_neg_splat: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv2f16_neg_splat: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v11, v9, v10, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v11 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl) @@ -2015,11 +4155,29 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_neg_splat(<vscale x 2 x half> %va } define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_neg_splat_commute(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv2f16_neg_splat_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv2f16_neg_splat_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv2f16_neg_splat_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v11, v10, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl) @@ -2028,11 +4186,29 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_neg_splat_commute(<vscale x 2 x h } define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_neg_splat_unmasked(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv2f16_neg_splat_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv2f16_neg_splat_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv2f16_neg_splat_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v11, v10 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -2041,11 +4217,29 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_neg_splat_unmasked(<vscale x 2 x } define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute(<vscale x 2 x half> %va, half %b, <vscale x 2 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v9, v11, v10 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl) @@ -2056,34 +4250,82 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute(<vscal declare <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half>, <vscale x 4 x i1>, i32) define <vscale x 4 x half> @vfmsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vv_nxv4f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfmsub.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv.v.v v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vv_nxv4f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfmsub.vv v9, v8, v10, v0.t +; ZVFH-NEXT: vmv.v.v v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vv_nxv4f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v10, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl) %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> %m, i32 %evl) ret <vscale x 4 x half> %v } define <vscale x 4 x half> @vfmsub_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vv_nxv4f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfmsub.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vv_nxv4f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfmsub.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vv_nxv4f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v10, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl) ret <vscale x 4 x half> %v } define <vscale x 4 x half> @vfmsub_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv4f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv4f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv4f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v14, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl) @@ -2092,11 +4334,29 @@ define <vscale x 4 x half> @vfmsub_vf_nxv4f16(<vscale x 4 x half> %va, half %b, } define <vscale x 4 x half> @vfmsub_vf_nxv4f16_commute(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv4f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv4f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv4f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v8, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl) @@ -2105,11 +4365,29 @@ define <vscale x 4 x half> @vfmsub_vf_nxv4f16_commute(<vscale x 4 x half> %va, h } define <vscale x 4 x half> @vfmsub_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv4f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv4f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv4f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v14, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -2118,11 +4396,29 @@ define <vscale x 4 x half> @vfmsub_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, } define <vscale x 4 x half> @vfmsub_vf_nxv4f16_unmasked_commute(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv4f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv4f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv4f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v14, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -2131,12 +4427,28 @@ define <vscale x 4 x half> @vfmsub_vf_nxv4f16_unmasked_commute(<vscale x 4 x hal } define <vscale x 4 x half> @vfnmadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv4f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv.v.v v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv4f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; ZVFH-NEXT: vmv.v.v v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv4f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v10, v14, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl) %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl) %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negb, <vscale x 4 x half> %negc, <vscale x 4 x i1> %m, i32 %evl) @@ -2144,11 +4456,27 @@ define <vscale x 4 x half> @vfnmadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale } define <vscale x 4 x half> @vfnmadd_vv_nxv4f16_commuted(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv4f16_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv4f16_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv4f16_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v10, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl) %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl) %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negb, <vscale x 4 x half> %va, <vscale x 4 x half> %negc, <vscale x 4 x i1> %m, i32 %evl) @@ -2156,11 +4484,27 @@ define <vscale x 4 x half> @vfnmadd_vv_nxv4f16_commuted(<vscale x 4 x half> %va, } define <vscale x 4 x half> @vfnmadd_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv4f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv4f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv4f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v10, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negb, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -2168,11 +4512,27 @@ define <vscale x 4 x half> @vfnmadd_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, } define <vscale x 4 x half> @vfnmadd_vv_nxv4f16_unmasked_commuted(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv4f16_unmasked_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv4f16_unmasked_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv4f16_unmasked_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v10, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negb, <vscale x 4 x half> %va, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -2180,11 +4540,30 @@ define <vscale x 4 x half> @vfnmadd_vv_nxv4f16_unmasked_commuted(<vscale x 4 x h } define <vscale x 4 x half> @vfnmadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv4f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv4f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv4f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v14, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl) @@ -2194,11 +4573,30 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b, } define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_commute(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv4f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv4f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv4f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v8, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl) @@ -2208,11 +4606,30 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_commute(<vscale x 4 x half> %va, } define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv4f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv4f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv4f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v14, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -2222,11 +4639,30 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, } define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_unmasked_commute(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv4f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv4f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv4f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v14, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -2236,11 +4672,30 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_unmasked_commute(<vscale x 4 x ha } define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_neg_splat(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv4f16_neg_splat: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv4f16_neg_splat: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv4f16_neg_splat: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v10, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl) @@ -2250,11 +4705,30 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_neg_splat(<vscale x 4 x half> %va } define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_neg_splat_commute(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv4f16_neg_splat_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv4f16_neg_splat_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv4f16_neg_splat_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v10, v14, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl) @@ -2264,11 +4738,30 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_neg_splat_commute(<vscale x 4 x h } define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_neg_splat_unmasked(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv4f16_neg_splat_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv4f16_neg_splat_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv4f16_neg_splat_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v10, v14, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -2278,11 +4771,30 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_neg_splat_unmasked(<vscale x 4 x } define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_neg_splat_unmasked_commute(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv4f16_neg_splat_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv4f16_neg_splat_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv4f16_neg_splat_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v10, v14, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -2292,12 +4804,28 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_neg_splat_unmasked_commute(<vscal } define <vscale x 4 x half> @vfnmsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv4f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv.v.v v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv4f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; ZVFH-NEXT: vmv.v.v v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv4f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v10, v14, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl) %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl) %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negb, <vscale x 4 x half> %negc, <vscale x 4 x i1> %m, i32 %evl) @@ -2305,11 +4833,27 @@ define <vscale x 4 x half> @vfnmsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale } define <vscale x 4 x half> @vfnmsub_vv_nxv4f16_commuted(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv4f16_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv4f16_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv4f16_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v10, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl) %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl) %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negb, <vscale x 4 x half> %va, <vscale x 4 x half> %negc, <vscale x 4 x i1> %m, i32 %evl) @@ -2317,11 +4861,27 @@ define <vscale x 4 x half> @vfnmsub_vv_nxv4f16_commuted(<vscale x 4 x half> %va, } define <vscale x 4 x half> @vfnmsub_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv4f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv4f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv4f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v10, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negb, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -2329,11 +4889,27 @@ define <vscale x 4 x half> @vfnmsub_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, } define <vscale x 4 x half> @vfnmsub_vv_nxv4f16_unmasked_commuted(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv4f16_unmasked_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v9, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv4f16_unmasked_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v9, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv4f16_unmasked_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v9, v9, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v10, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negb, <vscale x 4 x half> %va, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -2341,11 +4917,29 @@ define <vscale x 4 x half> @vfnmsub_vv_nxv4f16_unmasked_commuted(<vscale x 4 x h } define <vscale x 4 x half> @vfnmsub_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv4f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv4f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv4f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v12, v14, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl) @@ -2354,11 +4948,29 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16(<vscale x 4 x half> %va, half %b, } define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_commute(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv4f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv4f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv4f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v8, v14, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl) @@ -2367,11 +4979,29 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_commute(<vscale x 4 x half> %va, } define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv4f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv4f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv4f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v12, v14 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -2380,11 +5010,29 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, } define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_unmasked_commute(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv4f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv4f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv4f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v12, v14 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -2393,11 +5041,29 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_unmasked_commute(<vscale x 4 x ha } define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_neg_splat(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv4f16_neg_splat: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv4f16_neg_splat: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv4f16_neg_splat: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v14, v10, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl) @@ -2406,11 +5072,29 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_neg_splat(<vscale x 4 x half> %va } define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_neg_splat_commute(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv4f16_neg_splat_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv4f16_neg_splat_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv4f16_neg_splat_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v12, v10, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl) @@ -2419,11 +5103,29 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_neg_splat_commute(<vscale x 4 x h } define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_neg_splat_unmasked(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv4f16_neg_splat_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv4f16_neg_splat_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv4f16_neg_splat_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v12, v10 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -2432,11 +5134,29 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_neg_splat_unmasked(<vscale x 4 x } define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute(<vscale x 4 x half> %va, half %b, <vscale x 4 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v10, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v14, v12, v10 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl) @@ -2447,34 +5167,82 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute(<vscal declare <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, i32) define <vscale x 8 x half> @vfmsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vv_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfmsub.vv v10, v8, v12, v0.t -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vv_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfmsub.vv v10, v8, v12, v0.t +; ZVFH-NEXT: vmv.v.v v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vv_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v12, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl) %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> %m, i32 %evl) ret <vscale x 8 x half> %v } define <vscale x 8 x half> @vfmsub_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vv_nxv8f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfmsub.vv v8, v10, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vv_nxv8f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfmsub.vv v8, v10, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vv_nxv8f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v12, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl) ret <vscale x 8 x half> %v } define <vscale x 8 x half> @vfmsub_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v20, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl) @@ -2483,11 +5251,29 @@ define <vscale x 8 x half> @vfmsub_vf_nxv8f16(<vscale x 8 x half> %va, half %b, } define <vscale x 8 x half> @vfmsub_vf_nxv8f16_commute(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv8f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv8f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv8f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v8, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl) @@ -2496,11 +5282,29 @@ define <vscale x 8 x half> @vfmsub_vf_nxv8f16_commute(<vscale x 8 x half> %va, h } define <vscale x 8 x half> @vfmsub_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv8f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv8f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv8f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v20, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -2509,11 +5313,29 @@ define <vscale x 8 x half> @vfmsub_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, } define <vscale x 8 x half> @vfmsub_vf_nxv8f16_unmasked_commute(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv8f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv8f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv8f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v20, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -2522,12 +5344,28 @@ define <vscale x 8 x half> @vfmsub_vf_nxv8f16_unmasked_commute(<vscale x 8 x hal } define <vscale x 8 x half> @vfnmadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vv v10, v8, v12, v0.t -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v10, v8, v12, v0.t +; ZVFH-NEXT: vmv.v.v v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v20, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl) %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl) %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negb, <vscale x 8 x half> %negc, <vscale x 8 x i1> %m, i32 %evl) @@ -2535,11 +5373,27 @@ define <vscale x 8 x half> @vfnmadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale } define <vscale x 8 x half> @vfnmadd_vv_nxv8f16_commuted(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv8f16_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv8f16_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v10, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv8f16_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v12, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl) %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl) %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negb, <vscale x 8 x half> %va, <vscale x 8 x half> %negc, <vscale x 8 x i1> %m, i32 %evl) @@ -2547,11 +5401,27 @@ define <vscale x 8 x half> @vfnmadd_vv_nxv8f16_commuted(<vscale x 8 x half> %va, } define <vscale x 8 x half> @vfnmadd_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv8f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v10, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv8f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v10, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv8f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v12, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negb, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -2559,11 +5429,27 @@ define <vscale x 8 x half> @vfnmadd_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, } define <vscale x 8 x half> @vfnmadd_vv_nxv8f16_unmasked_commuted(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv8f16_unmasked_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v10, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv8f16_unmasked_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v10, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv8f16_unmasked_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v12, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negb, <vscale x 8 x half> %va, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -2571,11 +5457,30 @@ define <vscale x 8 x half> @vfnmadd_vv_nxv8f16_unmasked_commuted(<vscale x 8 x h } define <vscale x 8 x half> @vfnmadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v20, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl) @@ -2585,11 +5490,30 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b, } define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_commute(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv8f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv8f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv8f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v8, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl) @@ -2599,11 +5523,30 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_commute(<vscale x 8 x half> %va, } define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv8f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv8f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv8f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v20, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -2613,11 +5556,30 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, } define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_unmasked_commute(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv8f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv8f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv8f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v20, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -2627,11 +5589,30 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_unmasked_commute(<vscale x 8 x ha } define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_neg_splat(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv8f16_neg_splat: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv8f16_neg_splat: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv8f16_neg_splat: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v12, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl) @@ -2641,11 +5622,30 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_neg_splat(<vscale x 8 x half> %va } define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_neg_splat_commute(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv8f16_neg_splat_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv8f16_neg_splat_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv8f16_neg_splat_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v20, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl) @@ -2655,11 +5655,30 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_neg_splat_commute(<vscale x 8 x h } define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_neg_splat_unmasked(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv8f16_neg_splat_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv8f16_neg_splat_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv8f16_neg_splat_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v20, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -2669,11 +5688,30 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_neg_splat_unmasked(<vscale x 8 x } define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_neg_splat_unmasked_commute(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv8f16_neg_splat_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv8f16_neg_splat_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv8f16_neg_splat_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v20, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -2683,12 +5721,28 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_neg_splat_unmasked_commute(<vscal } define <vscale x 8 x half> @vfnmsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vv v10, v8, v12, v0.t -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v10, v8, v12, v0.t +; ZVFH-NEXT: vmv.v.v v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v20, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl) %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl) %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negb, <vscale x 8 x half> %negc, <vscale x 8 x i1> %m, i32 %evl) @@ -2696,11 +5750,27 @@ define <vscale x 8 x half> @vfnmsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale } define <vscale x 8 x half> @vfnmsub_vv_nxv8f16_commuted(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv8f16_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv8f16_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v10, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv8f16_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v12, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl) %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl) %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negb, <vscale x 8 x half> %va, <vscale x 8 x half> %negc, <vscale x 8 x i1> %m, i32 %evl) @@ -2708,11 +5778,27 @@ define <vscale x 8 x half> @vfnmsub_vv_nxv8f16_commuted(<vscale x 8 x half> %va, } define <vscale x 8 x half> @vfnmsub_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv8f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v10, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv8f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v10, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv8f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v12, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negb, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -2720,11 +5806,27 @@ define <vscale x 8 x half> @vfnmsub_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, } define <vscale x 8 x half> @vfnmsub_vv_nxv8f16_unmasked_commuted(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv8f16_unmasked_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v10, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv8f16_unmasked_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v10, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv8f16_unmasked_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v10, v10, a1 +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v12, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negb, <vscale x 8 x half> %va, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -2732,11 +5834,29 @@ define <vscale x 8 x half> @vfnmsub_vv_nxv8f16_unmasked_commuted(<vscale x 8 x h } define <vscale x 8 x half> @vfnmsub_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv8f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv8f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v20, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl) @@ -2745,11 +5865,29 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16(<vscale x 8 x half> %va, half %b, } define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_commute(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv8f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv8f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv8f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v20, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl) @@ -2758,11 +5896,29 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_commute(<vscale x 8 x half> %va, } define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv8f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv8f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv8f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v20 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -2771,11 +5927,29 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, } define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_unmasked_commute(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv8f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv8f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv8f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v20 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -2784,11 +5958,29 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_unmasked_commute(<vscale x 8 x ha } define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_neg_splat(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv8f16_neg_splat: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv8f16_neg_splat: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv8f16_neg_splat: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v20, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl) @@ -2797,11 +5989,29 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_neg_splat(<vscale x 8 x half> %va } define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_neg_splat_commute(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv8f16_neg_splat_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv8f16_neg_splat_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv8f16_neg_splat_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v16, v12, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl) @@ -2810,11 +6020,29 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_neg_splat_commute(<vscale x 8 x h } define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_neg_splat_unmasked(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv8f16_neg_splat_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv8f16_neg_splat_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv8f16_neg_splat_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v16, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -2823,11 +6051,29 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_neg_splat_unmasked(<vscale x 8 x } define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute(<vscale x 8 x half> %va, half %b, <vscale x 8 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v20, v16, v12 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl) @@ -2838,34 +6084,85 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute(<vscal declare <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half>, <vscale x 16 x i1>, i32) define <vscale x 16 x half> @vfmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vv_nxv16f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfmsub.vv v12, v8, v16, v0.t -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vv_nxv16f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfmsub.vv v12, v8, v16, v0.t +; ZVFH-NEXT: vmv.v.v v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vv_nxv16f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vmv4r.v v4, v12 +; ZVFHMIN-NEXT: vmv4r.v v20, v8 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl) %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> %m, i32 %evl) ret <vscale x 16 x half> %v } define <vscale x 16 x half> @vfmsub_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vv_nxv16f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfmsub.vv v8, v12, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vv_nxv16f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfmsub.vv v8, v12, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vv_nxv16f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: ret %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl) ret <vscale x 16 x half> %v } define <vscale x 16 x half> @vfmsub_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv16f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv16f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv16f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vmv4r.v v16, v8 +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v4, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl) @@ -2874,11 +6171,29 @@ define <vscale x 16 x half> @vfmsub_vf_nxv16f16(<vscale x 16 x half> %va, half % } define <vscale x 16 x half> @vfmsub_vf_nxv16f16_commute(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv16f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv16f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv16f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v4, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl) @@ -2887,11 +6202,43 @@ define <vscale x 16 x half> @vfmsub_vf_nxv16f16_commute(<vscale x 16 x half> %va } define <vscale x 16 x half> @vfmsub_vf_nxv16f16_unmasked(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv16f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv16f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv16f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 2 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 2 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -2900,11 +6247,43 @@ define <vscale x 16 x half> @vfmsub_vf_nxv16f16_unmasked(<vscale x 16 x half> %v } define <vscale x 16 x half> @vfmsub_vf_nxv16f16_unmasked_commute(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv16f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv16f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv16f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 2 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 2 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -2913,12 +6292,29 @@ define <vscale x 16 x half> @vfmsub_vf_nxv16f16_unmasked_commute(<vscale x 16 x } define <vscale x 16 x half> @vfnmadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv16f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vv v12, v8, v16, v0.t -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv16f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v12, v8, v16, v0.t +; ZVFH-NEXT: vmv.v.v v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv16f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vmv4r.v v4, v8 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl) %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl) %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negb, <vscale x 16 x half> %negc, <vscale x 16 x i1> %m, i32 %evl) @@ -2926,11 +6322,40 @@ define <vscale x 16 x half> @vfnmadd_vv_nxv16f16(<vscale x 16 x half> %va, <vsca } define <vscale x 16 x half> @vfnmadd_vv_nxv16f16_commuted(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv16f16_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv16f16_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v12, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv16f16_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl) %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl) %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negb, <vscale x 16 x half> %va, <vscale x 16 x half> %negc, <vscale x 16 x i1> %m, i32 %evl) @@ -2938,11 +6363,27 @@ define <vscale x 16 x half> @vfnmadd_vv_nxv16f16_commuted(<vscale x 16 x half> % } define <vscale x 16 x half> @vfnmadd_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv16f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v12, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv16f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v12, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv16f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negb, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -2950,11 +6391,27 @@ define <vscale x 16 x half> @vfnmadd_vv_nxv16f16_unmasked(<vscale x 16 x half> % } define <vscale x 16 x half> @vfnmadd_vv_nxv16f16_unmasked_commuted(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv16f16_unmasked_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v12, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv16f16_unmasked_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v12, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv16f16_unmasked_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negb, <vscale x 16 x half> %va, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -2962,11 +6419,43 @@ define <vscale x 16 x half> @vfnmadd_vv_nxv16f16_unmasked_commuted(<vscale x 16 } define <vscale x 16 x half> @vfnmadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv16f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv16f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv16f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v4, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl) @@ -2976,11 +6465,30 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16(<vscale x 16 x half> %va, half } define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_commute(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv16f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv16f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv16f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v4, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl) @@ -2990,11 +6498,44 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_commute(<vscale x 16 x half> %v } define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_unmasked(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv16f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv16f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv16f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 2 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 2 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -3004,11 +6545,44 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_unmasked(<vscale x 16 x half> % } define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_unmasked_commute(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv16f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv16f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv16f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 2 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 2 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -3018,11 +6592,44 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_unmasked_commute(<vscale x 16 x } define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv16f16_neg_splat: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv16f16_neg_splat: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv16f16_neg_splat: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 2 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v4, v16, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl4r.v v4, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 2 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl) @@ -3032,11 +6639,31 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat(<vscale x 16 x half> } define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat_commute(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv16f16_neg_splat_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv16f16_neg_splat_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv16f16_neg_splat_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vmv4r.v v4, v8 +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl) @@ -3046,11 +6673,30 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat_commute(<vscale x 16 } define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat_unmasked(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv16f16_neg_splat_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv16f16_neg_splat_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv16f16_neg_splat_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -3060,11 +6706,30 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat_unmasked(<vscale x 16 } define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat_unmasked_commute(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv16f16_neg_splat_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv16f16_neg_splat_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv16f16_neg_splat_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -3074,12 +6739,29 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat_unmasked_commute(<vsc } define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv16f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vv v12, v8, v16, v0.t -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv16f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v12, v8, v16, v0.t +; ZVFH-NEXT: vmv.v.v v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv16f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vmv4r.v v4, v8 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl) %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl) %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negb, <vscale x 16 x half> %negc, <vscale x 16 x i1> %m, i32 %evl) @@ -3087,11 +6769,40 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vsca } define <vscale x 16 x half> @vfnmsub_vv_nxv16f16_commuted(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv16f16_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv16f16_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v12, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv16f16_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl) %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl) %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negb, <vscale x 16 x half> %va, <vscale x 16 x half> %negc, <vscale x 16 x i1> %m, i32 %evl) @@ -3099,11 +6810,27 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16_commuted(<vscale x 16 x half> % } define <vscale x 16 x half> @vfnmsub_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv16f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v12, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv16f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v12, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv16f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negb, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -3111,11 +6838,27 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16_unmasked(<vscale x 16 x half> % } define <vscale x 16 x half> @vfnmsub_vv_nxv16f16_unmasked_commuted(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv16f16_unmasked_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v12, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv16f16_unmasked_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v12, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv16f16_unmasked_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v12, v12, a1 +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negb, <vscale x 16 x half> %va, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -3123,11 +6866,30 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16_unmasked_commuted(<vscale x 16 } define <vscale x 16 x half> @vfnmsub_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv16f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv16f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv16f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vmv4r.v v16, v12 +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v4, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl) @@ -3136,11 +6898,29 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16(<vscale x 16 x half> %va, half } define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_commute(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv16f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv16f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv16f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v4, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl) @@ -3149,11 +6929,43 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_commute(<vscale x 16 x half> %v } define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_unmasked(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv16f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv16f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv16f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 2 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 2 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -3162,11 +6974,43 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_unmasked(<vscale x 16 x half> % } define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_unmasked_commute(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv16f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv16f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv16f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 2 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 2 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -3175,11 +7019,30 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_unmasked_commute(<vscale x 16 x } define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_neg_splat(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv16f16_neg_splat: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv16f16_neg_splat: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv16f16_neg_splat: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vmv4r.v v4, v8 +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl) @@ -3188,11 +7051,31 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_neg_splat(<vscale x 16 x half> } define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_neg_splat_commute(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv16f16_neg_splat_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv16f16_neg_splat_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv16f16_neg_splat_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vmv4r.v v20, v12 +; ZVFHMIN-NEXT: vmv4r.v v4, v8 +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1, v0.t +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl) @@ -3201,11 +7084,29 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_neg_splat_commute(<vscale x 16 } define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_neg_splat_unmasked(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv16f16_neg_splat_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv16f16_neg_splat_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv16f16_neg_splat_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -3214,11 +7115,29 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_neg_splat_unmasked(<vscale x 16 } define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute(<vscale x 16 x half> %va, half %b, <vscale x 16 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl) @@ -3229,36 +7148,391 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute(<vsc declare <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half>, <vscale x 32 x i1>, i32) define <vscale x 32 x half> @vfmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vv_nxv32f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vfmsub.vv v16, v8, v24, v0.t -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vv_nxv32f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vl8re16.v v24, (a0) +; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFH-NEXT: vfmsub.vv v16, v8, v24, v0.t +; ZVFH-NEXT: vmv.v.v v8, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vv_nxv32f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a3, 42 +; ZVFHMIN-NEXT: mul a2, a2, a3 +; ZVFHMIN-NEXT: sub sp, sp, a2 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x2a, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 42 * vlenb +; ZVFHMIN-NEXT: vl8re16.v v24, (a0) +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a2, a0, 5 +; ZVFHMIN-NEXT: add a0, a2, a0 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a0, 8 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs1r.v v0, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v24, a0, v0.t +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a2, a0, 1 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: li a4, 25 +; ZVFHMIN-NEXT: mul a3, a3, a4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a4, a3, 4 +; ZVFHMIN-NEXT: add a3, a4, a3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: mv a3, a1 +; ZVFHMIN-NEXT: bltu a1, a2, .LBB244_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a3, a2 +; ZVFHMIN-NEXT: .LBB244_2: +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a5, a4, 5 +; ZVFHMIN-NEXT: add a4, a5, a4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl1r.v v0, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a5, a4, 4 +; ZVFHMIN-NEXT: add a4, a5, a4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: li a4, 25 +; ZVFHMIN-NEXT: mul a3, a3, a4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a4, a3, 4 +; ZVFHMIN-NEXT: add a3, a4, a3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: sub a2, a1, a2 +; ZVFHMIN-NEXT: sltu a1, a1, a2 +; ZVFHMIN-NEXT: addi a1, a1, -1 +; ZVFHMIN-NEXT: and a1, a1, a2 +; ZVFHMIN-NEXT: srli a0, a0, 2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 25 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a2, a0, 4 +; ZVFHMIN-NEXT: add a0, a2, a0 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 25 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a1, 42 +; ZVFHMIN-NEXT: mul a0, a0, a1 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl) %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl) ret <vscale x 32 x half> %v } define <vscale x 32 x half> @vfmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vv_nxv32f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vfmsub.vv v8, v16, v24 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vv_nxv32f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vl8re16.v v24, (a0) +; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFH-NEXT: vfmsub.vv v8, v16, v24 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vv_nxv32f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a3, 40 +; ZVFHMIN-NEXT: mul a2, a2, a3 +; ZVFHMIN-NEXT: sub sp, sp, a2 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; ZVFHMIN-NEXT: vl8re16.v v24, (a0) +; ZVFHMIN-NEXT: lui a0, 8 +; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v0, v24, a0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v24 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a0, a2, 1 +; ZVFHMIN-NEXT: sub a3, a1, a0 +; ZVFHMIN-NEXT: sltu a4, a1, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 5 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vmv8r.v v8, v16 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a3, 24 +; ZVFHMIN-NEXT: mul a2, a2, a3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a1, a0, .LBB245_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a1, a0 +; ZVFHMIN-NEXT: .LBB245_2: +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v0, v24 +; ZVFHMIN-NEXT: vmv8r.v v8, v0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a1, 40 +; ZVFHMIN-NEXT: mul a0, a0, a1 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl) ret <vscale x 32 x half> %v } define <vscale x 32 x half> @vfmsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv32f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv32f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv32f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1, v0.t +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 1 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: mv a3, a0 +; ZVFHMIN-NEXT: bltu a0, a2, .LBB246_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a3, a2 +; ZVFHMIN-NEXT: .LBB246_2: +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vmv4r.v v4, v12 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: sub a2, a0, a2 +; ZVFHMIN-NEXT: sltu a0, a0, a2 +; ZVFHMIN-NEXT: addi a0, a0, -1 +; ZVFHMIN-NEXT: and a0, a0, a2 +; ZVFHMIN-NEXT: srli a1, a1, 2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl) @@ -3267,11 +7541,124 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16(<vscale x 32 x half> %va, half % } define <vscale x 32 x half> @vfmsub_vf_nxv32f16_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv32f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv32f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv32f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1, v0.t +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 1 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: mv a3, a0 +; ZVFHMIN-NEXT: bltu a0, a2, .LBB247_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a3, a2 +; ZVFHMIN-NEXT: .LBB247_2: +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: vmv4r.v v4, v12 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: sub a2, a0, a2 +; ZVFHMIN-NEXT: sltu a0, a0, a2 +; ZVFHMIN-NEXT: addi a0, a0, -1 +; ZVFHMIN-NEXT: and a0, a0, a2 +; ZVFHMIN-NEXT: srli a1, a1, 2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a1 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl) @@ -3280,11 +7667,114 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16_commute(<vscale x 32 x half> %va } define <vscale x 32 x half> @vfmsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv32f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv32f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv32f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v7 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a0, a1, .LBB248_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB248_2: +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24 +; ZVFHMIN-NEXT: vmv8r.v v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -3293,11 +7783,116 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %v } define <vscale x 32 x half> @vfmsub_vf_nxv32f16_unmasked_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfmsub_vf_nxv32f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfmsub.vf v8, fa0, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfmsub_vf_nxv32f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfmsub.vf v8, fa0, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfmsub_vf_nxv32f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: vmv8r.v v24, v16 +; ZVFHMIN-NEXT: vmv8r.v v16, v8 +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v8, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v24, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v7 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a0, a1, .LBB249_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB249_2: +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24 +; ZVFHMIN-NEXT: vmv8r.v v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -3306,13 +7901,115 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16_unmasked_commute(<vscale x 32 x } define <vscale x 32 x half> @vfnmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv32f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vv v16, v8, v24, v0.t -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv32f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vl8re16.v v24, (a0) +; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vv v16, v8, v24, v0.t +; ZVFH-NEXT: vmv.v.v v8, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv32f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 5 +; ZVFHMIN-NEXT: sub sp, sp, a2 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: vl8re16.v v24, (a0) +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a0, 8 +; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v16, a0, v0.t +; ZVFHMIN-NEXT: vxor.vx v24, v24, a0, v0.t +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a2, a0, 1 +; ZVFHMIN-NEXT: mv a3, a1 +; ZVFHMIN-NEXT: vmv4r.v v4, v12 +; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a1, a2, .LBB250_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a3, a2 +; ZVFHMIN-NEXT: .LBB250_2: +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: sub a2, a1, a2 +; ZVFHMIN-NEXT: sltu a1, a1, a2 +; ZVFHMIN-NEXT: addi a1, a1, -1 +; ZVFHMIN-NEXT: and a1, a1, a2 +; ZVFHMIN-NEXT: srli a0, a0, 2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl) %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl) %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl) @@ -3320,12 +8017,137 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vsca } define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv32f16_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv32f16_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vl8re16.v v24, (a0) +; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v16, v24, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv32f16_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a3, 40 +; ZVFHMIN-NEXT: mul a2, a2, a3 +; ZVFHMIN-NEXT: sub sp, sp, a2 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; ZVFHMIN-NEXT: vl8re16.v v24, (a0) +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a0, 8 +; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v16, a0, v0.t +; ZVFHMIN-NEXT: vxor.vx v16, v24, a0, v0.t +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a2, a0, 1 +; ZVFHMIN-NEXT: mv a3, a1 +; ZVFHMIN-NEXT: vmv4r.v v4, v12 +; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a1, a2, .LBB251_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a3, a2 +; ZVFHMIN-NEXT: .LBB251_2: +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a3, a3, a4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: sub a2, a1, a2 +; ZVFHMIN-NEXT: sltu a1, a1, a2 +; ZVFHMIN-NEXT: addi a1, a1, -1 +; ZVFHMIN-NEXT: and a1, a1, a2 +; ZVFHMIN-NEXT: srli a0, a0, 2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a1, 40 +; ZVFHMIN-NEXT: mul a0, a0, a1 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl) %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl) %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl) @@ -3333,12 +8155,115 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_commuted(<vscale x 32 x half> % } define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv32f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v16, v24 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv32f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vl8re16.v v24, (a0) +; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v16, v24 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv32f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 5 +; ZVFHMIN-NEXT: sub sp, sp, a2 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a3, 24 +; ZVFHMIN-NEXT: mul a2, a2, a3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vl8re16.v v24, (a0) +; ZVFHMIN-NEXT: lui a0, 8 +; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a0 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v7 +; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v24, v24, a0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a0, a2, 1 +; ZVFHMIN-NEXT: sub a3, a1, a0 +; ZVFHMIN-NEXT: sltu a4, a1, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v28, v8 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a1, a0, .LBB252_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a1, a0 +; ZVFHMIN-NEXT: .LBB252_2: +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v0, v8, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v24, v0 +; ZVFHMIN-NEXT: vmv8r.v v8, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -3346,12 +8271,115 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked(<vscale x 32 x half> % } define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v16, v24 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vl8re16.v v24, (a0) +; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v16, v24 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 5 +; ZVFHMIN-NEXT: sub sp, sp, a2 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a3, 24 +; ZVFHMIN-NEXT: mul a2, a2, a3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vl8re16.v v24, (a0) +; ZVFHMIN-NEXT: lui a0, 8 +; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v16, a0 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v7 +; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v24, a0 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a0, a2, 1 +; ZVFHMIN-NEXT: sub a3, a1, a0 +; ZVFHMIN-NEXT: sltu a4, a1, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v28, v8 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: bltu a1, a0, .LBB253_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a1, a0 +; ZVFHMIN-NEXT: .LBB253_2: +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v0, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v24, v8 +; ZVFHMIN-NEXT: vmv8r.v v8, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -3359,11 +8387,138 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked_commuted(<vscale x 32 } define <vscale x 32 x half> @vfnmadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv32f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv32f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 40 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1, v0.t +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 1 +; ZVFHMIN-NEXT: mv a3, a0 +; ZVFHMIN-NEXT: addi a4, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vmv4r.v v4, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a0, a2, .LBB254_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a3, a2 +; ZVFHMIN-NEXT: .LBB254_2: +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a3, a3, a4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: sub a2, a0, a2 +; ZVFHMIN-NEXT: sltu a0, a0, a2 +; ZVFHMIN-NEXT: addi a0, a0, -1 +; ZVFHMIN-NEXT: and a0, a0, a2 +; ZVFHMIN-NEXT: srli a1, a1, 2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a1, 40 +; ZVFHMIN-NEXT: mul a0, a0, a1 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl) @@ -3373,11 +8528,125 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16(<vscale x 32 x half> %va, half } define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv32f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv32f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1, v0.t +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 1 +; ZVFHMIN-NEXT: mv a3, a0 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vmv4r.v v4, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: bltu a0, a2, .LBB255_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a3, a2 +; ZVFHMIN-NEXT: .LBB255_2: +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4 +; ZVFHMIN-NEXT: sub a2, a0, a2 +; ZVFHMIN-NEXT: sltu a0, a0, a2 +; ZVFHMIN-NEXT: addi a0, a0, -1 +; ZVFHMIN-NEXT: and a0, a0, a2 +; ZVFHMIN-NEXT: srli a1, a1, 2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl) @@ -3387,11 +8656,116 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_commute(<vscale x 32 x half> %v } define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv32f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv32f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v7 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v28, v8 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: bltu a0, a1, .LBB256_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB256_2: +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v24, v8 +; ZVFHMIN-NEXT: vmv8r.v v8, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -3401,11 +8775,117 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked(<vscale x 32 x half> % } define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv32f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv32f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v24, v8, a1 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v24, v8, a2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4 +; ZVFHMIN-NEXT: vmv1r.v v0, v24 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v28, v8 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: bltu a0, a1, .LBB257_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB257_2: +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v24, v16 +; ZVFHMIN-NEXT: vmv8r.v v8, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -3415,11 +8895,119 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked_commute(<vscale x 32 x } define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv32f16_neg_splat: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v24, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1, v0.t +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 1 +; ZVFHMIN-NEXT: mv a3, a0 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vmv4r.v v4, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: bltu a0, a2, .LBB258_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a3, a2 +; ZVFHMIN-NEXT: .LBB258_2: +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFHMIN-NEXT: addi a4, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: addi a4, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4 +; ZVFHMIN-NEXT: sub a2, a0, a2 +; ZVFHMIN-NEXT: sltu a0, a0, a2 +; ZVFHMIN-NEXT: addi a0, a0, -1 +; ZVFHMIN-NEXT: and a0, a0, a2 +; ZVFHMIN-NEXT: srli a1, a1, 2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl) @@ -3429,11 +9017,125 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half> } define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv32f16_neg_splat_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v24, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1, v0.t +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 1 +; ZVFHMIN-NEXT: mv a3, a0 +; ZVFHMIN-NEXT: addi a4, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vmv4r.v v4, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a0, a2, .LBB259_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a3, a2 +; ZVFHMIN-NEXT: .LBB259_2: +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4 +; ZVFHMIN-NEXT: sub a2, a0, a2 +; ZVFHMIN-NEXT: sltu a0, a0, a2 +; ZVFHMIN-NEXT: addi a0, a0, -1 +; ZVFHMIN-NEXT: and a0, a0, a2 +; ZVFHMIN-NEXT: srli a1, a1, 2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl) @@ -3443,11 +9145,117 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_commute(<vscale x 32 } define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v24, v24, a1 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v24, v8, a2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4 +; ZVFHMIN-NEXT: vmv1r.v v0, v24 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v28, v8 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: bltu a0, a1, .LBB260_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB260_2: +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v24, v16 +; ZVFHMIN-NEXT: vmv8r.v v8, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -3457,11 +9265,116 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked(<vscale x 32 } define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v24, a1 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v7 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v28, v8 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: bltu a0, a1, .LBB261_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB261_2: +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v24, v8 +; ZVFHMIN-NEXT: vmv8r.v v8, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -3471,13 +9384,115 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vsc } define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv32f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vv v16, v8, v24, v0.t -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv32f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vl8re16.v v24, (a0) +; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vv v16, v8, v24, v0.t +; ZVFH-NEXT: vmv.v.v v8, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv32f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 5 +; ZVFHMIN-NEXT: sub sp, sp, a2 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: vl8re16.v v24, (a0) +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a0, 8 +; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v16, a0, v0.t +; ZVFHMIN-NEXT: vxor.vx v24, v24, a0, v0.t +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a2, a0, 1 +; ZVFHMIN-NEXT: mv a3, a1 +; ZVFHMIN-NEXT: vmv4r.v v4, v12 +; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a1, a2, .LBB262_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a3, a2 +; ZVFHMIN-NEXT: .LBB262_2: +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: sub a2, a1, a2 +; ZVFHMIN-NEXT: sltu a1, a1, a2 +; ZVFHMIN-NEXT: addi a1, a1, -1 +; ZVFHMIN-NEXT: and a1, a1, a2 +; ZVFHMIN-NEXT: srli a0, a0, 2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl) %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl) %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl) @@ -3485,12 +9500,137 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca } define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv32f16_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv32f16_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vl8re16.v v24, (a0) +; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v16, v24, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv32f16_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a3, 40 +; ZVFHMIN-NEXT: mul a2, a2, a3 +; ZVFHMIN-NEXT: sub sp, sp, a2 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; ZVFHMIN-NEXT: vl8re16.v v24, (a0) +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a0, 8 +; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v16, a0, v0.t +; ZVFHMIN-NEXT: vxor.vx v16, v24, a0, v0.t +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a2, a0, 1 +; ZVFHMIN-NEXT: mv a3, a1 +; ZVFHMIN-NEXT: vmv4r.v v4, v12 +; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a1, a2, .LBB263_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a3, a2 +; ZVFHMIN-NEXT: .LBB263_2: +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a3, a3, a4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: sub a2, a1, a2 +; ZVFHMIN-NEXT: sltu a1, a1, a2 +; ZVFHMIN-NEXT: addi a1, a1, -1 +; ZVFHMIN-NEXT: and a1, a1, a2 +; ZVFHMIN-NEXT: srli a0, a0, 2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a1, 40 +; ZVFHMIN-NEXT: mul a0, a0, a1 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl) %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl) %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl) @@ -3498,12 +9638,115 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_commuted(<vscale x 32 x half> % } define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv32f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v16, v24 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv32f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vl8re16.v v24, (a0) +; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v16, v24 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv32f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 5 +; ZVFHMIN-NEXT: sub sp, sp, a2 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a3, 24 +; ZVFHMIN-NEXT: mul a2, a2, a3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vl8re16.v v24, (a0) +; ZVFHMIN-NEXT: lui a0, 8 +; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v16, a0 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v7 +; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v24, v24, a0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a0, a2, 1 +; ZVFHMIN-NEXT: sub a3, a1, a0 +; ZVFHMIN-NEXT: sltu a4, a1, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v28, v8 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a1, a0, .LBB264_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a1, a0 +; ZVFHMIN-NEXT: .LBB264_2: +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v0, v8, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v24, v0 +; ZVFHMIN-NEXT: vmv8r.v v8, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -3511,12 +9754,115 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> % } define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted: -; CHECK: # %bb.0: -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vfnmadd.vv v8, v16, v24 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vl8re16.v v24, (a0) +; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFH-NEXT: vfnmadd.vv v8, v16, v24 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 5 +; ZVFHMIN-NEXT: sub sp, sp, a2 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a3, 24 +; ZVFHMIN-NEXT: mul a2, a2, a3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vl8re16.v v24, (a0) +; ZVFHMIN-NEXT: lui a0, 8 +; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v16, a0 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v7 +; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v24, a0 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a0, a2, 1 +; ZVFHMIN-NEXT: sub a3, a1, a0 +; ZVFHMIN-NEXT: sltu a4, a1, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v28, v8 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: bltu a1, a0, .LBB265_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a1, a0 +; ZVFHMIN-NEXT: .LBB265_2: +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a0, a0, a2 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v0, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v24, v8 +; ZVFHMIN-NEXT: vmv8r.v v8, v24 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl) %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl) %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -3524,11 +9870,125 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32 } define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv32f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv32f16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv32f16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 40 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 1 +; ZVFHMIN-NEXT: vmv4r.v v4, v12 +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: mv a3, a0 +; ZVFHMIN-NEXT: bltu a0, a2, .LBB266_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a3, a2 +; ZVFHMIN-NEXT: .LBB266_2: +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 4 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: sub a2, a0, a2 +; ZVFHMIN-NEXT: sltu a0, a0, a2 +; ZVFHMIN-NEXT: addi a0, a0, -1 +; ZVFHMIN-NEXT: and a0, a0, a2 +; ZVFHMIN-NEXT: srli a1, a1, 2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a1, 40 +; ZVFHMIN-NEXT: mul a0, a0, a1 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl) @@ -3537,11 +9997,119 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, half } define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv32f16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv32f16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv32f16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 1 +; ZVFHMIN-NEXT: vmv4r.v v4, v12 +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFHMIN-NEXT: mv a3, a0 +; ZVFHMIN-NEXT: bltu a0, a2, .LBB267_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a3, a2 +; ZVFHMIN-NEXT: .LBB267_2: +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4 +; ZVFHMIN-NEXT: sub a2, a0, a2 +; ZVFHMIN-NEXT: sltu a0, a0, a2 +; ZVFHMIN-NEXT: addi a0, a0, -1 +; ZVFHMIN-NEXT: and a0, a0, a2 +; ZVFHMIN-NEXT: srli a1, a1, 2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl) @@ -3550,11 +10118,115 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_commute(<vscale x 32 x half> %v } define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv32f16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv32f16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv32f16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v7 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vmv4r.v v8, v16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a0, a1, .LBB268_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB268_2: +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v0, v8 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24 +; ZVFHMIN-NEXT: vmv8r.v v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -3563,11 +10235,114 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_unmasked(<vscale x 32 x half> % } define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_unmasked_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv32f16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv32f16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv32f16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v7 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vmv4r.v v8, v16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16 +; ZVFHMIN-NEXT: bltu a0, a1, .LBB269_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB269_2: +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -3576,11 +10351,119 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_unmasked_commute(<vscale x 32 x } define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_neg_splat(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv32f16_neg_splat: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv32f16_neg_splat: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv32f16_neg_splat: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vmv8r.v v16, v8 +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v8, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs1r.v v0, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 1 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFHMIN-NEXT: mv a3, a0 +; ZVFHMIN-NEXT: bltu a0, a2, .LBB270_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a3, a2 +; ZVFHMIN-NEXT: .LBB270_2: +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 24 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: addi a4, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v0, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vmv8r.v v0, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl1r.v v0, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: addi a4, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: sub a2, a0, a2 +; ZVFHMIN-NEXT: sltu a0, a0, a2 +; ZVFHMIN-NEXT: addi a0, a0, -1 +; ZVFHMIN-NEXT: and a0, a0, a2 +; ZVFHMIN-NEXT: srli a1, a1, 2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl) @@ -3589,11 +10472,139 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_neg_splat(<vscale x 32 x half> } define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_neg_splat_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv32f16_neg_splat_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv32f16_neg_splat_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv32f16_neg_splat_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 34 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x22, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 34 * vlenb +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 25 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs1r.v v0, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v16, v24, a1, v0.t +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 1 +; ZVFHMIN-NEXT: vmv4r.v v28, v20 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a4, a3, 4 +; ZVFHMIN-NEXT: add a3, a4, a3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a4, a3, 3 +; ZVFHMIN-NEXT: add a3, a4, a3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: mv a3, a0 +; ZVFHMIN-NEXT: bltu a0, a2, .LBB271_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a3, a2 +; ZVFHMIN-NEXT: .LBB271_2: +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: li a5, 25 +; ZVFHMIN-NEXT: mul a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24 +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vmv8r.v v0, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0 +; ZVFHMIN-NEXT: addi a4, sp, 16 +; ZVFHMIN-NEXT: vl1r.v v0, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a5, a4, 3 +; ZVFHMIN-NEXT: add a4, a5, a4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a4, a3, 4 +; ZVFHMIN-NEXT: add a3, a4, a3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a4, a3, 3 +; ZVFHMIN-NEXT: add a3, a4, a3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: sub a2, a0, a2 +; ZVFHMIN-NEXT: sltu a0, a0, a2 +; ZVFHMIN-NEXT: addi a0, a0, -1 +; ZVFHMIN-NEXT: and a0, a0, a2 +; ZVFHMIN-NEXT: srli a1, a1, 2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 25 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a2, a1, 3 +; ZVFHMIN-NEXT: add a1, a2, a1 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: li a1, 34 +; ZVFHMIN-NEXT: mul a0, a0, a1 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl) @@ -3602,11 +10613,115 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_neg_splat_commute(<vscale x 32 } define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_neg_splat_unmasked(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv32f16_neg_splat_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv32f16_neg_splat_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv32f16_neg_splat_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: vmv8r.v v24, v16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v8, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v7 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFHMIN-NEXT: vmv8r.v v8, v24 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16 +; ZVFHMIN-NEXT: bltu a0, a1, .LBB272_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB272_2: +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v16 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -3615,11 +10730,115 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_neg_splat_unmasked(<vscale x 32 } define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) { -; CHECK-LABEL: vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vfnmsub.vf v8, fa0, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vxor.vx v8, v24, a1 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v7 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: sltu a4, a0, a3 +; ZVFHMIN-NEXT: addi a4, a4, -1 +; ZVFHMIN-NEXT: and a3, a4, a3 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vmv4r.v v8, v16 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: li a4, 24 +; ZVFHMIN-NEXT: mul a2, a2, a4 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; ZVFHMIN-NEXT: bltu a0, a1, .LBB273_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB273_2: +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v24, v0, v8 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24 +; ZVFHMIN-NEXT: vmv8r.v v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: ret %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0 %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl) @@ -7167,12 +14386,24 @@ define <vscale x 8 x double> @vfnmsub_vf_nxv8f64_neg_splat_unmasked_commute(<vsc } define <vscale x 1 x half> @vfma_vv_nxv1f16_double_neg(<vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vfma_vv_nxv1f16_double_neg: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfma_vv_nxv1f16_double_neg: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfmadd.vv v9, v8, v10, v0.t +; ZVFH-NEXT: vmv1r.v v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfma_vv_nxv1f16_double_neg: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 +; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v12, v10, v11, v0.t +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %m, i32 %evl) %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl) %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %negb, <vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir index 36a9244..73af03b 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir @@ -60,12 +60,12 @@ registers: - { id: 0, class: vecr } - { id: 1, class: vecr } # AVX: %0:vr256 = COPY $ymm1 -# AVX-NEXT: %1:vr128 = VEXTRACTF128rr %0, 1 +# AVX-NEXT: %1:vr128 = VEXTRACTF128rri %0, 1 # AVX-NEXT: $xmm0 = COPY %1 # AVX-NEXT: RET 0, implicit $xmm0 # # AVX512VL: %0:vr256x = COPY $ymm1 -# AVX512VL-NEXT: %1:vr128x = VEXTRACTF32x4Z256rr %0, 1 +# AVX512VL-NEXT: %1:vr128x = VEXTRACTF32x4Z256rri %0, 1 # AVX512VL-NEXT: $xmm0 = COPY %1 # AVX512VL-NEXT: RET 0, implicit $xmm0 body: | diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir index f0491b6..5ddf58e 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir @@ -59,7 +59,7 @@ registers: - { id: 0, class: vecr } - { id: 1, class: vecr } # ALL: %0:vr512 = COPY $zmm1 -# ALL-NEXT: %1:vr128x = VEXTRACTF32x4Zrr %0, 1 +# ALL-NEXT: %1:vr128x = VEXTRACTF32x4Zrri %0, 1 # ALL-NEXT: $xmm0 = COPY %1 # ALL-NEXT: RET 0, implicit $xmm0 body: | @@ -111,7 +111,7 @@ registers: - { id: 0, class: vecr } - { id: 1, class: vecr } # ALL: %0:vr512 = COPY $zmm1 -# ALL-NEXT: %1:vr256x = VEXTRACTF64x4Zrr %0, 1 +# ALL-NEXT: %1:vr256x = VEXTRACTF64x4Zrri %0, 1 # ALL-NEXT: $ymm0 = COPY %1 # ALL-NEXT: RET 0, implicit $ymm0 body: | diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir index 9424e1d..f04917c 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir @@ -30,13 +30,13 @@ registers: - { id: 2, class: vecr } # AVX: %0:vr256 = COPY $ymm0 # AVX-NEXT: %1:vr128 = COPY $xmm1 -# AVX-NEXT: %2:vr256 = VINSERTF128rr %0, %1, 0 +# AVX-NEXT: %2:vr256 = VINSERTF128rri %0, %1, 0 # AVX-NEXT: $ymm0 = COPY %2 # AVX-NEXT: RET 0, implicit $ymm0 # # AVX512VL: %0:vr256x = COPY $ymm0 # AVX512VL-NEXT: %1:vr128x = COPY $xmm1 -# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rr %0, %1, 0 +# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rri %0, %1, 0 # AVX512VL-NEXT: $ymm0 = COPY %2 # AVX512VL-NEXT: RET 0, implicit $ymm0 body: | @@ -92,13 +92,13 @@ registers: - { id: 2, class: vecr } # AVX: %0:vr256 = COPY $ymm0 # AVX-NEXT: %1:vr128 = COPY $xmm1 -# AVX-NEXT: %2:vr256 = VINSERTF128rr %0, %1, 1 +# AVX-NEXT: %2:vr256 = VINSERTF128rri %0, %1, 1 # AVX-NEXT: $ymm0 = COPY %2 # AVX-NEXT: RET 0, implicit $ymm0 # # AVX512VL: %0:vr256x = COPY $ymm0 # AVX512VL-NEXT: %1:vr128x = COPY $xmm1 -# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rr %0, %1, 1 +# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rri %0, %1, 1 # AVX512VL-NEXT: $ymm0 = COPY %2 # AVX512VL-NEXT: RET 0, implicit $ymm0 body: | @@ -123,13 +123,13 @@ registers: - { id: 2, class: vecr } # AVX: %0:vr256 = IMPLICIT_DEF # AVX-NEXT: %1:vr128 = COPY $xmm1 -# AVX-NEXT: %2:vr256 = VINSERTF128rr %0, %1, 1 +# AVX-NEXT: %2:vr256 = VINSERTF128rri %0, %1, 1 # AVX-NEXT: $ymm0 = COPY %2 # AVX-NEXT: RET 0, implicit $ymm0 # # AVX512VL: %0:vr256x = IMPLICIT_DEF # AVX512VL-NEXT: %1:vr128x = COPY $xmm1 -# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rr %0, %1, 1 +# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rri %0, %1, 1 # AVX512VL-NEXT: $ymm0 = COPY %2 # AVX512VL-NEXT: RET 0, implicit $ymm0 body: | diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir index fefce0b..10d98d7 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir @@ -51,8 +51,8 @@ body: | ; ALL-LABEL: name: test_insert_128_idx0 ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1 - ; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[COPY]], [[COPY1]], 0 - ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]] + ; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[COPY]], [[COPY1]], 0 + ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri]] ; ALL: RET 0, implicit $ymm0 %0(<16 x s32>) = COPY $zmm0 %1(<4 x s32>) = COPY $xmm1 @@ -102,8 +102,8 @@ body: | ; ALL-LABEL: name: test_insert_128_idx1 ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1 - ; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[COPY]], [[COPY1]], 1 - ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]] + ; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[COPY]], [[COPY1]], 1 + ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri]] ; ALL: RET 0, implicit $ymm0 %0(<16 x s32>) = COPY $zmm0 %1(<4 x s32>) = COPY $xmm1 @@ -127,8 +127,8 @@ body: | ; ALL-LABEL: name: test_insert_128_idx1_undef ; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1 - ; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[DEF]], [[COPY]], 1 - ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]] + ; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[DEF]], [[COPY]], 1 + ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri]] ; ALL: RET 0, implicit $ymm0 %0(<16 x s32>) = IMPLICIT_DEF %1(<4 x s32>) = COPY $xmm1 @@ -152,8 +152,8 @@ body: | ; ALL-LABEL: name: test_insert_256_idx0 ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1 - ; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[COPY]], [[COPY1]], 0 - ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]] + ; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri [[COPY]], [[COPY1]], 0 + ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]] ; ALL: RET 0, implicit $ymm0 %0(<16 x s32>) = COPY $zmm0 %1(<8 x s32>) = COPY $ymm1 @@ -203,8 +203,8 @@ body: | ; ALL-LABEL: name: test_insert_256_idx1 ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1 - ; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[COPY]], [[COPY1]], 1 - ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]] + ; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri [[COPY]], [[COPY1]], 1 + ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]] ; ALL: RET 0, implicit $ymm0 %0(<16 x s32>) = COPY $zmm0 %1(<8 x s32>) = COPY $ymm1 @@ -228,8 +228,8 @@ body: | ; ALL-LABEL: name: test_insert_256_idx1_undef ; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1 - ; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[DEF]], [[COPY]], 1 - ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]] + ; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri [[DEF]], [[COPY]], 1 + ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]] ; ALL: RET 0, implicit $ymm0 %0(<16 x s32>) = IMPLICIT_DEF %1(<8 x s32>) = COPY $ymm1 diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir index 8c04cc6..9d6494d62 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir @@ -23,14 +23,14 @@ body: | ; AVX-LABEL: name: test_merge ; AVX: [[DEF:%[0-9]+]]:vr128 = IMPLICIT_DEF ; AVX: undef %2.sub_xmm:vr256 = COPY [[DEF]] - ; AVX: [[VINSERTF128rr:%[0-9]+]]:vr256 = VINSERTF128rr %2, [[DEF]], 1 - ; AVX: $ymm0 = COPY [[VINSERTF128rr]] + ; AVX: [[VINSERTF128rri:%[0-9]+]]:vr256 = VINSERTF128rri %2, [[DEF]], 1 + ; AVX: $ymm0 = COPY [[VINSERTF128rri]] ; AVX: RET 0, implicit $ymm0 ; AVX512VL-LABEL: name: test_merge ; AVX512VL: [[DEF:%[0-9]+]]:vr128x = IMPLICIT_DEF ; AVX512VL: undef %2.sub_xmm:vr256x = COPY [[DEF]] - ; AVX512VL: [[VINSERTF32x4Z256rr:%[0-9]+]]:vr256x = VINSERTF32x4Z256rr %2, [[DEF]], 1 - ; AVX512VL: $ymm0 = COPY [[VINSERTF32x4Z256rr]] + ; AVX512VL: [[VINSERTF32x4Z256rri:%[0-9]+]]:vr256x = VINSERTF32x4Z256rri %2, [[DEF]], 1 + ; AVX512VL: $ymm0 = COPY [[VINSERTF32x4Z256rri]] ; AVX512VL: RET 0, implicit $ymm0 %0(<4 x s32>) = IMPLICIT_DEF %1(<8 x s32>) = G_CONCAT_VECTORS %0(<4 x s32>), %0(<4 x s32>) diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir index 3c003d6..22045d3 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir @@ -24,10 +24,10 @@ body: | ; ALL-LABEL: name: test_merge_v128 ; ALL: [[DEF:%[0-9]+]]:vr128x = IMPLICIT_DEF ; ALL: undef %2.sub_xmm:vr512 = COPY [[DEF]] - ; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr %2, [[DEF]], 1 - ; ALL: [[VINSERTF32x4Zrr1:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[VINSERTF32x4Zrr]], [[DEF]], 2 - ; ALL: [[VINSERTF32x4Zrr2:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[VINSERTF32x4Zrr1]], [[DEF]], 3 - ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr2]] + ; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri %2, [[DEF]], 1 + ; ALL: [[VINSERTF32x4Zrri1:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[VINSERTF32x4Zrri]], [[DEF]], 2 + ; ALL: [[VINSERTF32x4Zrri2:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[VINSERTF32x4Zrri1]], [[DEF]], 3 + ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri2]] ; ALL: RET 0, implicit $zmm0 %0(<4 x s32>) = IMPLICIT_DEF %1(<16 x s32>) = G_CONCAT_VECTORS %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>) @@ -49,8 +49,8 @@ body: | ; ALL-LABEL: name: test_merge_v256 ; ALL: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF ; ALL: undef %2.sub_ymm:vr512 = COPY [[DEF]] - ; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr %2, [[DEF]], 1 - ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]] + ; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri %2, [[DEF]], 1 + ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]] ; ALL: RET 0, implicit $zmm0 %0(<8 x s32>) = IMPLICIT_DEF %1(<16 x s32>) = G_CONCAT_VECTORS %0(<8 x s32>), %0(<8 x s32>) diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir index 3947192..5ed1463 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir @@ -24,18 +24,19 @@ body: | ; AVX-LABEL: name: test_unmerge ; AVX: [[DEF:%[0-9]+]]:vr256 = IMPLICIT_DEF - ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY [[DEF]].sub_xmm - ; AVX: [[VEXTRACTF128rr:%[0-9]+]]:vr128 = VEXTRACTF128rr [[DEF]], 1 - ; AVX: $xmm0 = COPY [[COPY]] - ; AVX: $xmm1 = COPY [[VEXTRACTF128rr]] - ; AVX: RET 0, implicit $xmm0, implicit $xmm1 + ; AVX-NEXT: [[COPY:%[0-9]+]]:vr128 = COPY [[DEF]].sub_xmm + ; AVX-NEXT: [[VEXTRACTF128rri:%[0-9]+]]:vr128 = VEXTRACTF128rri [[DEF]], 1 + ; AVX-NEXT: $xmm0 = COPY [[COPY]] + ; AVX-NEXT: $xmm1 = COPY [[VEXTRACTF128rri]] + ; AVX-NEXT: RET 0, implicit $xmm0, implicit $xmm1 + ; ; AVX512VL-LABEL: name: test_unmerge ; AVX512VL: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF - ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY [[DEF]].sub_xmm - ; AVX512VL: [[VEXTRACTF32x4Z256rr:%[0-9]+]]:vr128x = VEXTRACTF32x4Z256rr [[DEF]], 1 - ; AVX512VL: $xmm0 = COPY [[COPY]] - ; AVX512VL: $xmm1 = COPY [[VEXTRACTF32x4Z256rr]] - ; AVX512VL: RET 0, implicit $xmm0, implicit $xmm1 + ; AVX512VL-NEXT: [[COPY:%[0-9]+]]:vr128x = COPY [[DEF]].sub_xmm + ; AVX512VL-NEXT: [[VEXTRACTF32x4Z256rri:%[0-9]+]]:vr128x = VEXTRACTF32x4Z256rri [[DEF]], 1 + ; AVX512VL-NEXT: $xmm0 = COPY [[COPY]] + ; AVX512VL-NEXT: $xmm1 = COPY [[VEXTRACTF32x4Z256rri]] + ; AVX512VL-NEXT: RET 0, implicit $xmm0, implicit $xmm1 %0(<8 x s32>) = IMPLICIT_DEF %1(<4 x s32>), %2(<4 x s32>) = G_UNMERGE_VALUES %0(<8 x s32>) $xmm0 = COPY %1(<4 x s32>) diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir index 17730f9..8864d5b 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir @@ -27,9 +27,9 @@ body: | ; ALL-LABEL: name: test_unmerge_v128 ; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY [[DEF]].sub_xmm - ; ALL: [[VEXTRACTF32x4Zrr:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrr [[DEF]], 1 - ; ALL: [[VEXTRACTF32x4Zrr1:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrr [[DEF]], 2 - ; ALL: [[VEXTRACTF32x4Zrr2:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrr [[DEF]], 3 + ; ALL: [[VEXTRACTF32x4Zrri:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrri [[DEF]], 1 + ; ALL: [[VEXTRACTF32x4Zrri1:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrri [[DEF]], 2 + ; ALL: [[VEXTRACTF32x4Zrri2:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrri [[DEF]], 3 ; ALL: $xmm0 = COPY [[COPY]] ; ALL: RET 0, implicit $xmm0 %0(<16 x s32>) = IMPLICIT_DEF @@ -53,7 +53,7 @@ body: | ; ALL-LABEL: name: test_unmerge_v256 ; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY [[DEF]].sub_ymm - ; ALL: [[VEXTRACTF64x4Zrr:%[0-9]+]]:vr256x = VEXTRACTF64x4Zrr [[DEF]], 1 + ; ALL: [[VEXTRACTF64x4Zrri:%[0-9]+]]:vr256x = VEXTRACTF64x4Zrri [[DEF]], 1 ; ALL: $ymm0 = COPY [[COPY]] ; ALL: RET 0, implicit $ymm0 %0(<16 x s32>) = IMPLICIT_DEF diff --git a/llvm/test/CodeGen/X86/cmp-shiftX-maskX.ll b/llvm/test/CodeGen/X86/cmp-shiftX-maskX.ll index 67070b9..227de9a 100644 --- a/llvm/test/CodeGen/X86/cmp-shiftX-maskX.ll +++ b/llvm/test/CodeGen/X86/cmp-shiftX-maskX.ll @@ -994,6 +994,30 @@ define i1 @shr_to_rotate_eq_i32_s5(i32 %x) { ret i1 %r } +define i32 @issue108722(i32 %0) { +; CHECK-NOBMI-LABEL: issue108722: +; CHECK-NOBMI: # %bb.0: +; CHECK-NOBMI-NEXT: movl %edi, %ecx +; CHECK-NOBMI-NEXT: roll $24, %ecx +; CHECK-NOBMI-NEXT: xorl %eax, %eax +; CHECK-NOBMI-NEXT: cmpl %edi, %ecx +; CHECK-NOBMI-NEXT: sete %al +; CHECK-NOBMI-NEXT: retq +; +; CHECK-BMI2-LABEL: issue108722: +; CHECK-BMI2: # %bb.0: +; CHECK-BMI2-NEXT: rorxl $8, %edi, %ecx +; CHECK-BMI2-NEXT: xorl %eax, %eax +; CHECK-BMI2-NEXT: cmpl %edi, %ecx +; CHECK-BMI2-NEXT: sete %al +; CHECK-BMI2-NEXT: retq + %2 = tail call i32 @llvm.fshl.i32(i32 %0, i32 %0, i32 24) + %3 = icmp eq i32 %2, %0 + %4 = zext i1 %3 to i32 + ret i32 %4 +} + + ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; CHECK-AVX: {{.*}} ; CHECK-NOBMI-SSE2: {{.*}} diff --git a/llvm/test/CodeGen/X86/ctlo.ll b/llvm/test/CodeGen/X86/ctlo.ll index f383c9a..2f4fef8 100644 --- a/llvm/test/CodeGen/X86/ctlo.ll +++ b/llvm/test/CodeGen/X86/ctlo.ll @@ -54,20 +54,18 @@ define i8 @ctlo_i8(i8 %x) { ; ; X86-CLZ-LABEL: ctlo_i8: ; X86-CLZ: # %bb.0: -; X86-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; X86-CLZ-NEXT: notb %al -; X86-CLZ-NEXT: movzbl %al, %eax +; X86-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-CLZ-NEXT: shll $24, %eax +; X86-CLZ-NEXT: notl %eax ; X86-CLZ-NEXT: lzcntl %eax, %eax -; X86-CLZ-NEXT: addl $-24, %eax ; X86-CLZ-NEXT: # kill: def $al killed $al killed $eax ; X86-CLZ-NEXT: retl ; ; X64-CLZ-LABEL: ctlo_i8: ; X64-CLZ: # %bb.0: -; X64-CLZ-NEXT: notb %dil -; X64-CLZ-NEXT: movzbl %dil, %eax -; X64-CLZ-NEXT: lzcntl %eax, %eax -; X64-CLZ-NEXT: addl $-24, %eax +; X64-CLZ-NEXT: shll $24, %edi +; X64-CLZ-NEXT: notl %edi +; X64-CLZ-NEXT: lzcntl %edi, %eax ; X64-CLZ-NEXT: # kill: def $al killed $al killed $eax ; X64-CLZ-NEXT: retq %tmp1 = xor i8 %x, -1 diff --git a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir index 13c9585..2f587d7 100644 --- a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir +++ b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir @@ -877,21 +877,21 @@ body: | $ymm0 = VRNDSCALEPSZ256rmi $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr ; CHECK: $ymm0 = VROUNDPSYri $ymm0, 15, implicit $mxcsr $ymm0 = VRNDSCALEPSZ256rri $ymm0, 15, implicit $mxcsr - ; CHECK: $ymm0 = VPERM2F128rm $ymm0, $rip, 1, $noreg, 0, $noreg, 32 + ; CHECK: $ymm0 = VPERM2F128rmi $ymm0, $rip, 1, $noreg, 0, $noreg, 32 $ymm0 = VSHUFF32X4Z256rmi $ymm0, $rip, 1, $noreg, 0, $noreg, 228 - ; CHECK: $ymm0 = VPERM2F128rr $ymm0, $ymm1, 32 + ; CHECK: $ymm0 = VPERM2F128rri $ymm0, $ymm1, 32 $ymm0 = VSHUFF32X4Z256rri $ymm0, $ymm1, 228 - ; CHECK: $ymm0 = VPERM2F128rm $ymm0, $rip, 1, $noreg, 0, $noreg, 32 + ; CHECK: $ymm0 = VPERM2F128rmi $ymm0, $rip, 1, $noreg, 0, $noreg, 32 $ymm0 = VSHUFF64X2Z256rmi $ymm0, $rip, 1, $noreg, 0, $noreg, 228 - ; CHECK: $ymm0 = VPERM2F128rr $ymm0, $ymm1, 32 + ; CHECK: $ymm0 = VPERM2F128rri $ymm0, $ymm1, 32 $ymm0 = VSHUFF64X2Z256rri $ymm0, $ymm1, 228 - ; CHECK: $ymm0 = VPERM2I128rm $ymm0, $rip, 1, $noreg, 0, $noreg, 32 + ; CHECK: $ymm0 = VPERM2I128rmi $ymm0, $rip, 1, $noreg, 0, $noreg, 32 $ymm0 = VSHUFI32X4Z256rmi $ymm0, $rip, 1, $noreg, 0, $noreg, 228 - ; CHECK: $ymm0 = VPERM2I128rr $ymm0, $ymm1, 32 + ; CHECK: $ymm0 = VPERM2I128rri $ymm0, $ymm1, 32 $ymm0 = VSHUFI32X4Z256rri $ymm0, $ymm1, 228 - ; CHECK: $ymm0 = VPERM2I128rm $ymm0, $rip, 1, $noreg, 0, $noreg, 32 + ; CHECK: $ymm0 = VPERM2I128rmi $ymm0, $rip, 1, $noreg, 0, $noreg, 32 $ymm0 = VSHUFI64X2Z256rmi $ymm0, $rip, 1, $noreg, 0, $noreg, 228 - ; CHECK: $ymm0 = VPERM2I128rr $ymm0, $ymm1, 32 + ; CHECK: $ymm0 = VPERM2I128rri $ymm0, $ymm1, 32 $ymm0 = VSHUFI64X2Z256rri $ymm0, $ymm1, 228 RET64 @@ -2074,38 +2074,38 @@ body: | $xmm0 = VFNMSUB231SSZr $xmm0, $xmm1, $xmm2, implicit $mxcsr ; CHECK: $xmm0 = VFNMSUB231SSr_Int $xmm0, $xmm1, $xmm2, implicit $mxcsr $xmm0 = VFNMSUB231SSZr_Int $xmm0, $xmm1, $xmm2, implicit $mxcsr - ; CHECK: VPEXTRBmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 - VPEXTRBZmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 - ; CHECK: $eax = VPEXTRBrr $xmm0, 1 - $eax = VPEXTRBZrr $xmm0, 1 - ; CHECK: VPEXTRDmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 - VPEXTRDZmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 - ; CHECK: $eax = VPEXTRDrr $xmm0, 1 - $eax = VPEXTRDZrr $xmm0, 1 - ; CHECK: VPEXTRQmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 - VPEXTRQZmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 - ; CHECK: $rax = VPEXTRQrr $xmm0, 1 - $rax = VPEXTRQZrr $xmm0, 1 - ; CHECK: VPEXTRWmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 - VPEXTRWZmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 - ; CHECK: $eax = VPEXTRWrr $xmm0, 1 - $eax = VPEXTRWZrr $xmm0, 1 - ; CHECK: $xmm0 = VPINSRBrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 - $xmm0 = VPINSRBZrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 - ; CHECK: $xmm0 = VPINSRBrr $xmm0, $edi, 5 - $xmm0 = VPINSRBZrr $xmm0, $edi, 5 - ; CHECK: $xmm0 = VPINSRDrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 - $xmm0 = VPINSRDZrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 - ; CHECK: $xmm0 = VPINSRDrr $xmm0, $edi, 5 - $xmm0 = VPINSRDZrr $xmm0, $edi, 5 - ; CHECK: $xmm0 = VPINSRQrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 - $xmm0 = VPINSRQZrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 - ; CHECK: $xmm0 = VPINSRQrr $xmm0, $rdi, 5 - $xmm0 = VPINSRQZrr $xmm0, $rdi, 5 - ; CHECK: $xmm0 = VPINSRWrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 - $xmm0 = VPINSRWZrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 - ; CHECK: $xmm0 = VPINSRWrr $xmm0, $edi, 5 - $xmm0 = VPINSRWZrr $xmm0, $edi, 5 + ; CHECK: VPEXTRBmri $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + VPEXTRBZmri $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + ; CHECK: $eax = VPEXTRBrri $xmm0, 1 + $eax = VPEXTRBZrri $xmm0, 1 + ; CHECK: VPEXTRDmri $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + VPEXTRDZmri $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + ; CHECK: $eax = VPEXTRDrri $xmm0, 1 + $eax = VPEXTRDZrri $xmm0, 1 + ; CHECK: VPEXTRQmri $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + VPEXTRQZmri $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + ; CHECK: $rax = VPEXTRQrri $xmm0, 1 + $rax = VPEXTRQZrri $xmm0, 1 + ; CHECK: VPEXTRWmri $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + VPEXTRWZmri $rdi, 1, $noreg, 0, $noreg, $xmm0, 3 + ; CHECK: $eax = VPEXTRWrri $xmm0, 1 + $eax = VPEXTRWZrri $xmm0, 1 + ; CHECK: $xmm0 = VPINSRBrmi $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm0 = VPINSRBZrmi $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm0 = VPINSRBrri $xmm0, $edi, 5 + $xmm0 = VPINSRBZrri $xmm0, $edi, 5 + ; CHECK: $xmm0 = VPINSRDrmi $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm0 = VPINSRDZrmi $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm0 = VPINSRDrri $xmm0, $edi, 5 + $xmm0 = VPINSRDZrri $xmm0, $edi, 5 + ; CHECK: $xmm0 = VPINSRQrmi $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm0 = VPINSRQZrmi $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm0 = VPINSRQrri $xmm0, $rdi, 5 + $xmm0 = VPINSRQZrri $xmm0, $rdi, 5 + ; CHECK: $xmm0 = VPINSRWrmi $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm0 = VPINSRWZrmi $xmm0, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm0 = VPINSRWrri $xmm0, $edi, 5 + $xmm0 = VPINSRWZrri $xmm0, $edi, 5 ; CHECK: $xmm0 = VSQRTSDm $xmm0, $rdi, 1, $noreg, 0, $noreg, implicit $mxcsr $xmm0 = VSQRTSDZm $xmm0, $rdi, 1, $noreg, 0, $noreg, implicit $mxcsr ; CHECK: $xmm0 = VSQRTSDm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg, implicit $mxcsr @@ -2300,14 +2300,14 @@ body: | VUCOMISSZrm $xmm0, $rdi, 1, $noreg, 0, $noreg, implicit-def $eflags, implicit $mxcsr ; CHECK: VUCOMISSrr $xmm0, $xmm1, implicit-def $eflags, implicit $mxcsr VUCOMISSZrr $xmm0, $xmm1, implicit-def $eflags, implicit $mxcsr - ; CHECK: VEXTRACTPSmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 1 - VEXTRACTPSZmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 1 - ; CHECK: $eax = VEXTRACTPSrr $xmm0, 1 - $eax = VEXTRACTPSZrr $xmm0, 1 - ; CHECK: $xmm0 = VINSERTPSrm $xmm0, $rdi, 1, $noreg, 0, $noreg, 1 - $xmm0 = VINSERTPSZrm $xmm0, $rdi, 1, $noreg, 0, $noreg, 1 - ; CHECK: $xmm0 = VINSERTPSrr $xmm0, $xmm0, 1 - $xmm0 = VINSERTPSZrr $xmm0, $xmm0, 1 + ; CHECK: VEXTRACTPSmri $rdi, 1, $noreg, 0, $noreg, $xmm0, 1 + VEXTRACTPSZmri $rdi, 1, $noreg, 0, $noreg, $xmm0, 1 + ; CHECK: $eax = VEXTRACTPSrri $xmm0, 1 + $eax = VEXTRACTPSZrri $xmm0, 1 + ; CHECK: $xmm0 = VINSERTPSrmi $xmm0, $rdi, 1, $noreg, 0, $noreg, 1 + $xmm0 = VINSERTPSZrmi $xmm0, $rdi, 1, $noreg, 0, $noreg, 1 + ; CHECK: $xmm0 = VINSERTPSrri $xmm0, $xmm0, 1 + $xmm0 = VINSERTPSZrri $xmm0, $xmm0, 1 ; CHECK: $xmm0 = VROUNDSDmi $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr $xmm0 = VRNDSCALESDZm $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr ; CHECK: $xmm0 = VROUNDSDri $xmm0, $xmm1, 15, implicit $mxcsr @@ -4068,14 +4068,14 @@ body: | $xmm16 = VPALIGNRZ128rmi $xmm16, $rdi, 1, $noreg, 0, $noreg, 15 ; CHECK: $xmm16 = VPALIGNRZ128rri $xmm16, $xmm1, 15 $xmm16 = VPALIGNRZ128rri $xmm16, $xmm1, 15 - ; CHECK: VEXTRACTPSZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 1 - VEXTRACTPSZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 1 - ; CHECK: $eax = VEXTRACTPSZrr $xmm16, 1 - $eax = VEXTRACTPSZrr $xmm16, 1 - ; CHECK: $xmm16 = VINSERTPSZrm $xmm16, $rdi, 1, $noreg, 0, $noreg, 1 - $xmm16 = VINSERTPSZrm $xmm16, $rdi, 1, $noreg, 0, $noreg, 1 - ; CHECK: $xmm16 = VINSERTPSZrr $xmm16, $xmm16, 1 - $xmm16 = VINSERTPSZrr $xmm16, $xmm16, 1 + ; CHECK: VEXTRACTPSZmri $rdi, 1, $noreg, 0, $noreg, $xmm16, 1 + VEXTRACTPSZmri $rdi, 1, $noreg, 0, $noreg, $xmm16, 1 + ; CHECK: $eax = VEXTRACTPSZrri $xmm16, 1 + $eax = VEXTRACTPSZrri $xmm16, 1 + ; CHECK: $xmm16 = VINSERTPSZrmi $xmm16, $rdi, 1, $noreg, 0, $noreg, 1 + $xmm16 = VINSERTPSZrmi $xmm16, $rdi, 1, $noreg, 0, $noreg, 1 + ; CHECK: $xmm16 = VINSERTPSZrri $xmm16, $xmm16, 1 + $xmm16 = VINSERTPSZrri $xmm16, $xmm16, 1 ; CHECK: $xmm16 = VRNDSCALEPDZ128rmi $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr $xmm16 = VRNDSCALEPDZ128rmi $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr ; CHECK: $xmm16 = VRNDSCALEPDZ128rri $xmm16, 15, implicit $mxcsr @@ -4406,38 +4406,38 @@ body: | $xmm16 = VFNMSUB231SSZr $xmm16, $xmm1, $xmm2, implicit $mxcsr ; CHECK: $xmm16 = VFNMSUB231SSZr_Int $xmm16, $xmm1, $xmm2, implicit $mxcsr $xmm16 = VFNMSUB231SSZr_Int $xmm16, $xmm1, $xmm2, implicit $mxcsr - ; CHECK: VPEXTRBZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 - VPEXTRBZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 - ; CHECK: $eax = VPEXTRBZrr $xmm16, 1 - $eax = VPEXTRBZrr $xmm16, 1 - ; CHECK: VPEXTRDZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 - VPEXTRDZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 - ; CHECK: $eax = VPEXTRDZrr $xmm16, 1 - $eax = VPEXTRDZrr $xmm16, 1 - ; CHECK: VPEXTRQZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 - VPEXTRQZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 - ; CHECK: $rax = VPEXTRQZrr $xmm16, 1 - $rax = VPEXTRQZrr $xmm16, 1 - ; CHECK: VPEXTRWZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 - VPEXTRWZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 - ; CHECK: $eax = VPEXTRWZrr $xmm16, 1 - $eax = VPEXTRWZrr $xmm16, 1 - ; CHECK: $xmm16 = VPINSRBZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 - $xmm16 = VPINSRBZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 - ; CHECK: $xmm16 = VPINSRBZrr $xmm16, $edi, 5 - $xmm16 = VPINSRBZrr $xmm16, $edi, 5 - ; CHECK: $xmm16 = VPINSRDZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 - $xmm16 = VPINSRDZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 - ; CHECK: $xmm16 = VPINSRDZrr $xmm16, $edi, 5 - $xmm16 = VPINSRDZrr $xmm16, $edi, 5 - ; CHECK: $xmm16 = VPINSRQZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 - $xmm16 = VPINSRQZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 - ; CHECK: $xmm16 = VPINSRQZrr $xmm16, $rdi, 5 - $xmm16 = VPINSRQZrr $xmm16, $rdi, 5 - ; CHECK: $xmm16 = VPINSRWZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 - $xmm16 = VPINSRWZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 - ; CHECK: $xmm16 = VPINSRWZrr $xmm16, $edi, 5 - $xmm16 = VPINSRWZrr $xmm16, $edi, 5 + ; CHECK: VPEXTRBZmri $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + VPEXTRBZmri $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + ; CHECK: $eax = VPEXTRBZrri $xmm16, 1 + $eax = VPEXTRBZrri $xmm16, 1 + ; CHECK: VPEXTRDZmri $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + VPEXTRDZmri $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + ; CHECK: $eax = VPEXTRDZrri $xmm16, 1 + $eax = VPEXTRDZrri $xmm16, 1 + ; CHECK: VPEXTRQZmri $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + VPEXTRQZmri $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + ; CHECK: $rax = VPEXTRQZrri $xmm16, 1 + $rax = VPEXTRQZrri $xmm16, 1 + ; CHECK: VPEXTRWZmri $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + VPEXTRWZmri $rdi, 1, $noreg, 0, $noreg, $xmm16, 3 + ; CHECK: $eax = VPEXTRWZrri $xmm16, 1 + $eax = VPEXTRWZrri $xmm16, 1 + ; CHECK: $xmm16 = VPINSRBZrmi $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm16 = VPINSRBZrmi $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm16 = VPINSRBZrri $xmm16, $edi, 5 + $xmm16 = VPINSRBZrri $xmm16, $edi, 5 + ; CHECK: $xmm16 = VPINSRDZrmi $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm16 = VPINSRDZrmi $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm16 = VPINSRDZrri $xmm16, $edi, 5 + $xmm16 = VPINSRDZrri $xmm16, $edi, 5 + ; CHECK: $xmm16 = VPINSRQZrmi $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm16 = VPINSRQZrmi $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm16 = VPINSRQZrri $xmm16, $rdi, 5 + $xmm16 = VPINSRQZrri $xmm16, $rdi, 5 + ; CHECK: $xmm16 = VPINSRWZrmi $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + $xmm16 = VPINSRWZrmi $xmm16, $rsi, 1, $noreg, 0, $noreg, 3 + ; CHECK: $xmm16 = VPINSRWZrri $xmm16, $edi, 5 + $xmm16 = VPINSRWZrri $xmm16, $edi, 5 ; CHECK: $xmm16 = VSQRTSDZm $xmm16, $rdi, 1, $noreg, 0, $noreg, implicit $mxcsr $xmm16 = VSQRTSDZm $xmm16, $rdi, 1, $noreg, 0, $noreg, implicit $mxcsr ; CHECK: $xmm16 = VSQRTSDZm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg, implicit $mxcsr diff --git a/llvm/test/CodeGen/X86/opt_phis2.mir b/llvm/test/CodeGen/X86/opt_phis2.mir index 23c75b3..f688e83 100644 --- a/llvm/test/CodeGen/X86/opt_phis2.mir +++ b/llvm/test/CodeGen/X86/opt_phis2.mir @@ -49,23 +49,23 @@ body: | bb.4: %3:vr256 = COPY %8 - %17:vr128 = VEXTRACTF128rr %8, 1 - VPEXTRDmr %9, 1, $noreg, 12, $noreg, killed %17, 2 + %17:vr128 = VEXTRACTF128rri %8, 1 + VPEXTRDmri %9, 1, $noreg, 12, $noreg, killed %17, 2 bb.5: %4:vr256 = PHI %0, %bb.1, %3, %bb.4 - %18:vr128 = VEXTRACTF128rr %4, 1 - VPEXTRDmr %9, 1, $noreg, 8, $noreg, killed %18, 1 + %18:vr128 = VEXTRACTF128rri %4, 1 + VPEXTRDmri %9, 1, $noreg, 8, $noreg, killed %18, 1 bb.6: %5:vr256 = PHI %1, %bb.2, %4, %bb.5 - %19:vr128 = VEXTRACTF128rr %5, 1 + %19:vr128 = VEXTRACTF128rri %5, 1 VMOVPDI2DImr %9, 1, $noreg, 4, $noreg, killed %19 bb.7: %6:vr256 = PHI %2, %bb.3, %5, %bb.6 %20:vr128 = COPY %6.sub_xmm - VPEXTRDmr %9, 1, $noreg, 0, $noreg, killed %20, 3 + VPEXTRDmri %9, 1, $noreg, 0, $noreg, killed %20, 3 bb.8: RET 0 diff --git a/llvm/test/CodeGen/X86/pr108728.ll b/llvm/test/CodeGen/X86/pr108728.ll new file mode 100644 index 0000000..75a6618 --- /dev/null +++ b/llvm/test/CodeGen/X86/pr108728.ll @@ -0,0 +1,14 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s + +define i8 @PR108728(i1 %a0) { +; CHECK-LABEL: PR108728: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: retq + %sel = select i1 %a0, i8 0, i8 1 + %not = xor i8 %sel, -1 + %udiv = udiv i8 1, %not + %cnt = tail call i8 @llvm.ctpop.i8(i8 %udiv) + ret i8 %cnt +} diff --git a/llvm/test/CodeGen/X86/vmaskmov-offset.ll b/llvm/test/CodeGen/X86/vmaskmov-offset.ll index d219ee9..73813c0 100644 --- a/llvm/test/CodeGen/X86/vmaskmov-offset.ll +++ b/llvm/test/CodeGen/X86/vmaskmov-offset.ll @@ -76,7 +76,7 @@ define void @one_mask_bit_set2(ptr %addr, <4 x float> %val) { ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; CHECK-NEXT: VEXTRACTPSmr [[COPY1]], 1, $noreg, 8, $noreg, [[COPY]], 2 :: (store (s32) into %ir.addr + 8) + ; CHECK-NEXT: VEXTRACTPSmri [[COPY1]], 1, $noreg, 8, $noreg, [[COPY]], 2 :: (store (s32) into %ir.addr + 8) ; CHECK-NEXT: RET 0 call void @llvm.masked.store.v4f32.p0(<4 x float> %val, ptr %addr, i32 4, <4 x i1><i1 false, i1 false, i1 true, i1 false>) ret void diff --git a/llvm/test/MC/WebAssembly/eh-assembly.s b/llvm/test/MC/WebAssembly/eh-assembly.s index 8c4ae3b..cd33d19 100644 --- a/llvm/test/MC/WebAssembly/eh-assembly.s +++ b/llvm/test/MC/WebAssembly/eh-assembly.s @@ -37,6 +37,21 @@ eh_legacy_test: catch __cpp_exception end_try drop + + # try-catch with a mulvivalue return + try () -> (i32, f32) + i32.const 0 + f32.const 0.0 + catch __cpp_exception + f32.const 1.0 + end_try + drop + drop + + # Catch-less try + try + call foo + end_try end_function # CHECK-LABEL: eh_legacy_test: @@ -66,4 +81,18 @@ eh_legacy_test: # CHECK-NEXT: catch __cpp_exception # CHECK-NEXT: end_try # CHECK-NEXT: drop + +# CHECK: try () -> (i32, f32) +# CHECK-NEXT: i32.const 0 +# CHECK-NEXT: f32.const 0x0p0 +# CHECK-NEXT: catch __cpp_exception +# CHECK-NEXT: f32.const 0x1p0 +# CHECK-NEXT: end_try +# CHECK-NEXT: drop +# CHECK-NEXT: drop + +# CHECK: try +# CHECK-NEXT: call foo +# CHECK-NEXT: end_try # CHECK-NEXT: end_function + diff --git a/llvm/test/TableGen/GlobalISelEmitter.td b/llvm/test/TableGen/GlobalISelEmitter.td index b9aea33..7dbaf43 100644 --- a/llvm/test/TableGen/GlobalISelEmitter.td +++ b/llvm/test/TableGen/GlobalISelEmitter.td @@ -513,7 +513,7 @@ def : Pat<(frag GPR32:$src1, complex:$src2, complex:$src3), // R00O-NEXT: GIM_Reject, // R00O: // Label [[DEFAULT_NUM]]: @[[DEFAULT]] // R00O-NEXT: GIM_Reject, -// R00O-NEXT: }; // Size: 1824 bytes +// R00O-NEXT: }; // Size: 1832 bytes def INSNBOB : I<(outs GPR32:$dst), (ins GPR32:$src1, GPR32:$src2, GPR32:$src3, GPR32:$src4), [(set GPR32:$dst, diff --git a/llvm/test/TableGen/x86-fold-tables.inc b/llvm/test/TableGen/x86-fold-tables.inc index be1b59e..e85708a 100644 --- a/llvm/test/TableGen/x86-fold-tables.inc +++ b/llvm/test/TableGen/x86-fold-tables.inc @@ -402,7 +402,7 @@ static const X86FoldTableEntry Table0[] = { {X86::DIV64r_NF, X86::DIV64m_NF, TB_FOLDED_LOAD}, {X86::DIV8r, X86::DIV8m, TB_FOLDED_LOAD}, {X86::DIV8r_NF, X86::DIV8m_NF, TB_FOLDED_LOAD}, - {X86::EXTRACTPSrr, X86::EXTRACTPSmr, TB_FOLDED_STORE}, + {X86::EXTRACTPSrri, X86::EXTRACTPSmri, TB_FOLDED_STORE}, {X86::IDIV16r, X86::IDIV16m, TB_FOLDED_LOAD}, {X86::IDIV16r_NF, X86::IDIV16m_NF, TB_FOLDED_LOAD}, {X86::IDIV32r, X86::IDIV32m, TB_FOLDED_LOAD}, @@ -461,8 +461,8 @@ static const X86FoldTableEntry Table0[] = { {X86::MUL64r_NF, X86::MUL64m_NF, TB_FOLDED_LOAD}, {X86::MUL8r, X86::MUL8m, TB_FOLDED_LOAD}, {X86::MUL8r_NF, X86::MUL8m_NF, TB_FOLDED_LOAD}, - {X86::PEXTRDrr, X86::PEXTRDmr, TB_FOLDED_STORE}, - {X86::PEXTRQrr, X86::PEXTRQmr, TB_FOLDED_STORE}, + {X86::PEXTRDrri, X86::PEXTRDmri, TB_FOLDED_STORE}, + {X86::PEXTRQrri, X86::PEXTRQmri, TB_FOLDED_STORE}, {X86::PTWRITE64r, X86::PTWRITE64m, TB_FOLDED_LOAD}, {X86::PTWRITEr, X86::PTWRITEm, TB_FOLDED_LOAD}, {X86::PUSH16r, X86::PUSH16rmm, TB_FOLDED_LOAD}, @@ -486,22 +486,22 @@ static const X86FoldTableEntry Table0[] = { {X86::VCVTPS2PHYrr, X86::VCVTPS2PHYmr, TB_FOLDED_STORE}, {X86::VCVTPS2PHZ256rr, X86::VCVTPS2PHZ256mr, TB_FOLDED_STORE}, {X86::VCVTPS2PHZrr, X86::VCVTPS2PHZmr, TB_FOLDED_STORE}, - {X86::VEXTRACTF128rr, X86::VEXTRACTF128mr, TB_FOLDED_STORE}, - {X86::VEXTRACTF32x4Z256rr, X86::VEXTRACTF32x4Z256mr, TB_FOLDED_STORE}, - {X86::VEXTRACTF32x4Zrr, X86::VEXTRACTF32x4Zmr, TB_FOLDED_STORE}, - {X86::VEXTRACTF32x8Zrr, X86::VEXTRACTF32x8Zmr, TB_FOLDED_STORE}, - {X86::VEXTRACTF64x2Z256rr, X86::VEXTRACTF64x2Z256mr, TB_FOLDED_STORE}, - {X86::VEXTRACTF64x2Zrr, X86::VEXTRACTF64x2Zmr, TB_FOLDED_STORE}, - {X86::VEXTRACTF64x4Zrr, X86::VEXTRACTF64x4Zmr, TB_FOLDED_STORE}, - {X86::VEXTRACTI128rr, X86::VEXTRACTI128mr, TB_FOLDED_STORE}, - {X86::VEXTRACTI32x4Z256rr, X86::VEXTRACTI32x4Z256mr, TB_FOLDED_STORE}, - {X86::VEXTRACTI32x4Zrr, X86::VEXTRACTI32x4Zmr, TB_FOLDED_STORE}, - {X86::VEXTRACTI32x8Zrr, X86::VEXTRACTI32x8Zmr, TB_FOLDED_STORE}, - {X86::VEXTRACTI64x2Z256rr, X86::VEXTRACTI64x2Z256mr, TB_FOLDED_STORE}, - {X86::VEXTRACTI64x2Zrr, X86::VEXTRACTI64x2Zmr, TB_FOLDED_STORE}, - {X86::VEXTRACTI64x4Zrr, X86::VEXTRACTI64x4Zmr, TB_FOLDED_STORE}, - {X86::VEXTRACTPSZrr, X86::VEXTRACTPSZmr, TB_FOLDED_STORE}, - {X86::VEXTRACTPSrr, X86::VEXTRACTPSmr, TB_FOLDED_STORE}, + {X86::VEXTRACTF128rri, X86::VEXTRACTF128mri, TB_FOLDED_STORE}, + {X86::VEXTRACTF32x4Z256rri, X86::VEXTRACTF32x4Z256mri, TB_FOLDED_STORE}, + {X86::VEXTRACTF32x4Zrri, X86::VEXTRACTF32x4Zmri, TB_FOLDED_STORE}, + {X86::VEXTRACTF32x8Zrri, X86::VEXTRACTF32x8Zmri, TB_FOLDED_STORE}, + {X86::VEXTRACTF64x2Z256rri, X86::VEXTRACTF64x2Z256mri, TB_FOLDED_STORE}, + {X86::VEXTRACTF64x2Zrri, X86::VEXTRACTF64x2Zmri, TB_FOLDED_STORE}, + {X86::VEXTRACTF64x4Zrri, X86::VEXTRACTF64x4Zmri, TB_FOLDED_STORE}, + {X86::VEXTRACTI128rri, X86::VEXTRACTI128mri, TB_FOLDED_STORE}, + {X86::VEXTRACTI32x4Z256rri, X86::VEXTRACTI32x4Z256mri, TB_FOLDED_STORE}, + {X86::VEXTRACTI32x4Zrri, X86::VEXTRACTI32x4Zmri, TB_FOLDED_STORE}, + {X86::VEXTRACTI32x8Zrri, X86::VEXTRACTI32x8Zmri, TB_FOLDED_STORE}, + {X86::VEXTRACTI64x2Z256rri, X86::VEXTRACTI64x2Z256mri, TB_FOLDED_STORE}, + {X86::VEXTRACTI64x2Zrri, X86::VEXTRACTI64x2Zmri, TB_FOLDED_STORE}, + {X86::VEXTRACTI64x4Zrri, X86::VEXTRACTI64x4Zmri, TB_FOLDED_STORE}, + {X86::VEXTRACTPSZrri, X86::VEXTRACTPSZmri, TB_FOLDED_STORE}, + {X86::VEXTRACTPSrri, X86::VEXTRACTPSmri, TB_FOLDED_STORE}, {X86::VMOV64toSDZrr, X86::MOV64mr, TB_FOLDED_STORE|TB_NO_REVERSE}, {X86::VMOV64toSDrr, X86::MOV64mr, TB_FOLDED_STORE|TB_NO_REVERSE}, {X86::VMOVAPDYrr, X86::VMOVAPDYmr, TB_FOLDED_STORE|TB_NO_REVERSE|TB_ALIGN_32}, @@ -556,10 +556,10 @@ static const X86FoldTableEntry Table0[] = { {X86::VMOVUPSZ256rr, X86::VMOVUPSZ256mr, TB_FOLDED_STORE|TB_NO_REVERSE}, {X86::VMOVUPSZrr, X86::VMOVUPSZmr, TB_FOLDED_STORE|TB_NO_REVERSE}, {X86::VMOVUPSrr, X86::VMOVUPSmr, TB_FOLDED_STORE|TB_NO_REVERSE}, - {X86::VPEXTRDZrr, X86::VPEXTRDZmr, TB_FOLDED_STORE}, - {X86::VPEXTRDrr, X86::VPEXTRDmr, TB_FOLDED_STORE}, - {X86::VPEXTRQZrr, X86::VPEXTRQZmr, TB_FOLDED_STORE}, - {X86::VPEXTRQrr, X86::VPEXTRQmr, TB_FOLDED_STORE}, + {X86::VPEXTRDZrri, X86::VPEXTRDZmri, TB_FOLDED_STORE}, + {X86::VPEXTRDrri, X86::VPEXTRDmri, TB_FOLDED_STORE}, + {X86::VPEXTRQZrri, X86::VPEXTRQZmri, TB_FOLDED_STORE}, + {X86::VPEXTRQrri, X86::VPEXTRQmri, TB_FOLDED_STORE}, {X86::VPMOVDBZrr, X86::VPMOVDBZmr, TB_FOLDED_STORE}, {X86::VPMOVDWZ256rr, X86::VPMOVDWZ256mr, TB_FOLDED_STORE}, {X86::VPMOVDWZrr, X86::VPMOVDWZmr, TB_FOLDED_STORE}, @@ -2165,7 +2165,7 @@ static const X86FoldTableEntry Table2[] = { {X86::MMX_PHSUBDrr, X86::MMX_PHSUBDrm, 0}, {X86::MMX_PHSUBSWrr, X86::MMX_PHSUBSWrm, 0}, {X86::MMX_PHSUBWrr, X86::MMX_PHSUBWrm, 0}, - {X86::MMX_PINSRWrr, X86::MMX_PINSRWrm, TB_NO_REVERSE}, + {X86::MMX_PINSRWrri, X86::MMX_PINSRWrmi, TB_NO_REVERSE}, {X86::MMX_PMADDUBSWrr, X86::MMX_PMADDUBSWrm, 0}, {X86::MMX_PMADDWDrr, X86::MMX_PMADDWDrm, 0}, {X86::MMX_PMAXSWrr, X86::MMX_PMAXSWrm, 0}, @@ -2295,10 +2295,10 @@ static const X86FoldTableEntry Table2[] = { {X86::PHSUBDrr, X86::PHSUBDrm, TB_ALIGN_16}, {X86::PHSUBSWrr, X86::PHSUBSWrm, TB_ALIGN_16}, {X86::PHSUBWrr, X86::PHSUBWrm, TB_ALIGN_16}, - {X86::PINSRBrr, X86::PINSRBrm, TB_NO_REVERSE}, - {X86::PINSRDrr, X86::PINSRDrm, 0}, - {X86::PINSRQrr, X86::PINSRQrm, 0}, - {X86::PINSRWrr, X86::PINSRWrm, TB_NO_REVERSE}, + {X86::PINSRBrri, X86::PINSRBrmi, TB_NO_REVERSE}, + {X86::PINSRDrri, X86::PINSRDrmi, 0}, + {X86::PINSRQrri, X86::PINSRQrmi, 0}, + {X86::PINSRWrri, X86::PINSRWrmi, TB_NO_REVERSE}, {X86::PMADDUBSWrr, X86::PMADDUBSWrm, TB_ALIGN_16}, {X86::PMADDWDrr, X86::PMADDWDrm, TB_ALIGN_16}, {X86::PMAXSBrr, X86::PMAXSBrm, TB_ALIGN_16}, @@ -2986,20 +2986,20 @@ static const X86FoldTableEntry Table2[] = { {X86::VHSUBPDrr, X86::VHSUBPDrm, 0}, {X86::VHSUBPSYrr, X86::VHSUBPSYrm, 0}, {X86::VHSUBPSrr, X86::VHSUBPSrm, 0}, - {X86::VINSERTF128rr, X86::VINSERTF128rm, 0}, - {X86::VINSERTF32x4Z256rr, X86::VINSERTF32x4Z256rm, 0}, - {X86::VINSERTF32x4Zrr, X86::VINSERTF32x4Zrm, 0}, - {X86::VINSERTF32x8Zrr, X86::VINSERTF32x8Zrm, 0}, - {X86::VINSERTF64x2Z256rr, X86::VINSERTF64x2Z256rm, 0}, - {X86::VINSERTF64x2Zrr, X86::VINSERTF64x2Zrm, 0}, - {X86::VINSERTF64x4Zrr, X86::VINSERTF64x4Zrm, 0}, - {X86::VINSERTI128rr, X86::VINSERTI128rm, 0}, - {X86::VINSERTI32x4Z256rr, X86::VINSERTI32x4Z256rm, 0}, - {X86::VINSERTI32x4Zrr, X86::VINSERTI32x4Zrm, 0}, - {X86::VINSERTI32x8Zrr, X86::VINSERTI32x8Zrm, 0}, - {X86::VINSERTI64x2Z256rr, X86::VINSERTI64x2Z256rm, 0}, - {X86::VINSERTI64x2Zrr, X86::VINSERTI64x2Zrm, 0}, - {X86::VINSERTI64x4Zrr, X86::VINSERTI64x4Zrm, 0}, + {X86::VINSERTF128rri, X86::VINSERTF128rmi, 0}, + {X86::VINSERTF32x4Z256rri, X86::VINSERTF32x4Z256rmi, 0}, + {X86::VINSERTF32x4Zrri, X86::VINSERTF32x4Zrmi, 0}, + {X86::VINSERTF32x8Zrri, X86::VINSERTF32x8Zrmi, 0}, + {X86::VINSERTF64x2Z256rri, X86::VINSERTF64x2Z256rmi, 0}, + {X86::VINSERTF64x2Zrri, X86::VINSERTF64x2Zrmi, 0}, + {X86::VINSERTF64x4Zrri, X86::VINSERTF64x4Zrmi, 0}, + {X86::VINSERTI128rri, X86::VINSERTI128rmi, 0}, + {X86::VINSERTI32x4Z256rri, X86::VINSERTI32x4Z256rmi, 0}, + {X86::VINSERTI32x4Zrri, X86::VINSERTI32x4Zrmi, 0}, + {X86::VINSERTI32x8Zrri, X86::VINSERTI32x8Zrmi, 0}, + {X86::VINSERTI64x2Z256rri, X86::VINSERTI64x2Z256rmi, 0}, + {X86::VINSERTI64x2Zrri, X86::VINSERTI64x2Zrmi, 0}, + {X86::VINSERTI64x4Zrri, X86::VINSERTI64x4Zrmi, 0}, {X86::VMAXCPDYrr, X86::VMAXCPDYrm, 0}, {X86::VMAXCPDZ128rr, X86::VMAXCPDZ128rm, 0}, {X86::VMAXCPDZ256rr, X86::VMAXCPDZ256rm, 0}, @@ -3411,8 +3411,8 @@ static const X86FoldTableEntry Table2[] = { {X86::VPCONFLICTQZ128rrkz, X86::VPCONFLICTQZ128rmkz, 0}, {X86::VPCONFLICTQZ256rrkz, X86::VPCONFLICTQZ256rmkz, 0}, {X86::VPCONFLICTQZrrkz, X86::VPCONFLICTQZrmkz, 0}, - {X86::VPERM2F128rr, X86::VPERM2F128rm, 0}, - {X86::VPERM2I128rr, X86::VPERM2I128rm, 0}, + {X86::VPERM2F128rri, X86::VPERM2F128rmi, 0}, + {X86::VPERM2I128rri, X86::VPERM2I128rmi, 0}, {X86::VPERMBZ128rr, X86::VPERMBZ128rm, 0}, {X86::VPERMBZ256rr, X86::VPERMBZ256rm, 0}, {X86::VPERMBZrr, X86::VPERMBZrm, 0}, @@ -3477,14 +3477,14 @@ static const X86FoldTableEntry Table2[] = { {X86::VPHSUBSWrr, X86::VPHSUBSWrm, 0}, {X86::VPHSUBWYrr, X86::VPHSUBWYrm, 0}, {X86::VPHSUBWrr, X86::VPHSUBWrm, 0}, - {X86::VPINSRBZrr, X86::VPINSRBZrm, TB_NO_REVERSE}, - {X86::VPINSRBrr, X86::VPINSRBrm, TB_NO_REVERSE}, - {X86::VPINSRDZrr, X86::VPINSRDZrm, 0}, - {X86::VPINSRDrr, X86::VPINSRDrm, 0}, - {X86::VPINSRQZrr, X86::VPINSRQZrm, 0}, - {X86::VPINSRQrr, X86::VPINSRQrm, 0}, - {X86::VPINSRWZrr, X86::VPINSRWZrm, TB_NO_REVERSE}, - {X86::VPINSRWrr, X86::VPINSRWrm, TB_NO_REVERSE}, + {X86::VPINSRBZrri, X86::VPINSRBZrmi, TB_NO_REVERSE}, + {X86::VPINSRBrri, X86::VPINSRBrmi, TB_NO_REVERSE}, + {X86::VPINSRDZrri, X86::VPINSRDZrmi, 0}, + {X86::VPINSRDrri, X86::VPINSRDrmi, 0}, + {X86::VPINSRQZrri, X86::VPINSRQZrmi, 0}, + {X86::VPINSRQrri, X86::VPINSRQrmi, 0}, + {X86::VPINSRWZrri, X86::VPINSRWZrmi, TB_NO_REVERSE}, + {X86::VPINSRWrri, X86::VPINSRWrmi, TB_NO_REVERSE}, {X86::VPLZCNTDZ128rrkz, X86::VPLZCNTDZ128rmkz, 0}, {X86::VPLZCNTDZ256rrkz, X86::VPLZCNTDZ256rmkz, 0}, {X86::VPLZCNTDZrrkz, X86::VPLZCNTDZrmkz, 0}, @@ -5057,18 +5057,18 @@ static const X86FoldTableEntry Table3[] = { {X86::VGF2P8MULBZ128rrkz, X86::VGF2P8MULBZ128rmkz, 0}, {X86::VGF2P8MULBZ256rrkz, X86::VGF2P8MULBZ256rmkz, 0}, {X86::VGF2P8MULBZrrkz, X86::VGF2P8MULBZrmkz, 0}, - {X86::VINSERTF32x4Z256rrkz, X86::VINSERTF32x4Z256rmkz, 0}, - {X86::VINSERTF32x4Zrrkz, X86::VINSERTF32x4Zrmkz, 0}, - {X86::VINSERTF32x8Zrrkz, X86::VINSERTF32x8Zrmkz, 0}, - {X86::VINSERTF64x2Z256rrkz, X86::VINSERTF64x2Z256rmkz, 0}, - {X86::VINSERTF64x2Zrrkz, X86::VINSERTF64x2Zrmkz, 0}, - {X86::VINSERTF64x4Zrrkz, X86::VINSERTF64x4Zrmkz, 0}, - {X86::VINSERTI32x4Z256rrkz, X86::VINSERTI32x4Z256rmkz, 0}, - {X86::VINSERTI32x4Zrrkz, X86::VINSERTI32x4Zrmkz, 0}, - {X86::VINSERTI32x8Zrrkz, X86::VINSERTI32x8Zrmkz, 0}, - {X86::VINSERTI64x2Z256rrkz, X86::VINSERTI64x2Z256rmkz, 0}, - {X86::VINSERTI64x2Zrrkz, X86::VINSERTI64x2Zrmkz, 0}, - {X86::VINSERTI64x4Zrrkz, X86::VINSERTI64x4Zrmkz, 0}, + {X86::VINSERTF32x4Z256rrikz, X86::VINSERTF32x4Z256rmikz, 0}, + {X86::VINSERTF32x4Zrrikz, X86::VINSERTF32x4Zrmikz, 0}, + {X86::VINSERTF32x8Zrrikz, X86::VINSERTF32x8Zrmikz, 0}, + {X86::VINSERTF64x2Z256rrikz, X86::VINSERTF64x2Z256rmikz, 0}, + {X86::VINSERTF64x2Zrrikz, X86::VINSERTF64x2Zrmikz, 0}, + {X86::VINSERTF64x4Zrrikz, X86::VINSERTF64x4Zrmikz, 0}, + {X86::VINSERTI32x4Z256rrikz, X86::VINSERTI32x4Z256rmikz, 0}, + {X86::VINSERTI32x4Zrrikz, X86::VINSERTI32x4Zrmikz, 0}, + {X86::VINSERTI32x8Zrrikz, X86::VINSERTI32x8Zrmikz, 0}, + {X86::VINSERTI64x2Z256rrikz, X86::VINSERTI64x2Z256rmikz, 0}, + {X86::VINSERTI64x2Zrrikz, X86::VINSERTI64x2Zrmikz, 0}, + {X86::VINSERTI64x4Zrrikz, X86::VINSERTI64x4Zrmikz, 0}, {X86::VMAXCPDZ128rrkz, X86::VMAXCPDZ128rmkz, 0}, {X86::VMAXCPDZ256rrkz, X86::VMAXCPDZ256rmkz, 0}, {X86::VMAXCPDZrrkz, X86::VMAXCPDZrmkz, 0}, @@ -6679,18 +6679,18 @@ static const X86FoldTableEntry Table4[] = { {X86::VGF2P8MULBZ128rrk, X86::VGF2P8MULBZ128rmk, 0}, {X86::VGF2P8MULBZ256rrk, X86::VGF2P8MULBZ256rmk, 0}, {X86::VGF2P8MULBZrrk, X86::VGF2P8MULBZrmk, 0}, - {X86::VINSERTF32x4Z256rrk, X86::VINSERTF32x4Z256rmk, 0}, - {X86::VINSERTF32x4Zrrk, X86::VINSERTF32x4Zrmk, 0}, - {X86::VINSERTF32x8Zrrk, X86::VINSERTF32x8Zrmk, 0}, - {X86::VINSERTF64x2Z256rrk, X86::VINSERTF64x2Z256rmk, 0}, - {X86::VINSERTF64x2Zrrk, X86::VINSERTF64x2Zrmk, 0}, - {X86::VINSERTF64x4Zrrk, X86::VINSERTF64x4Zrmk, 0}, - {X86::VINSERTI32x4Z256rrk, X86::VINSERTI32x4Z256rmk, 0}, - {X86::VINSERTI32x4Zrrk, X86::VINSERTI32x4Zrmk, 0}, - {X86::VINSERTI32x8Zrrk, X86::VINSERTI32x8Zrmk, 0}, - {X86::VINSERTI64x2Z256rrk, X86::VINSERTI64x2Z256rmk, 0}, - {X86::VINSERTI64x2Zrrk, X86::VINSERTI64x2Zrmk, 0}, - {X86::VINSERTI64x4Zrrk, X86::VINSERTI64x4Zrmk, 0}, + {X86::VINSERTF32x4Z256rrik, X86::VINSERTF32x4Z256rmik, 0}, + {X86::VINSERTF32x4Zrrik, X86::VINSERTF32x4Zrmik, 0}, + {X86::VINSERTF32x8Zrrik, X86::VINSERTF32x8Zrmik, 0}, + {X86::VINSERTF64x2Z256rrik, X86::VINSERTF64x2Z256rmik, 0}, + {X86::VINSERTF64x2Zrrik, X86::VINSERTF64x2Zrmik, 0}, + {X86::VINSERTF64x4Zrrik, X86::VINSERTF64x4Zrmik, 0}, + {X86::VINSERTI32x4Z256rrik, X86::VINSERTI32x4Z256rmik, 0}, + {X86::VINSERTI32x4Zrrik, X86::VINSERTI32x4Zrmik, 0}, + {X86::VINSERTI32x8Zrrik, X86::VINSERTI32x8Zrmik, 0}, + {X86::VINSERTI64x2Z256rrik, X86::VINSERTI64x2Z256rmik, 0}, + {X86::VINSERTI64x2Zrrik, X86::VINSERTI64x2Zrmik, 0}, + {X86::VINSERTI64x4Zrrik, X86::VINSERTI64x4Zrmik, 0}, {X86::VMAXCPDZ128rrk, X86::VMAXCPDZ128rmk, 0}, {X86::VMAXCPDZ256rrk, X86::VMAXCPDZ256rmk, 0}, {X86::VMAXCPDZrrk, X86::VMAXCPDZrmk, 0}, diff --git a/llvm/test/Transforms/FunctionSpecialization/discover-transitive-phis.ll b/llvm/test/Transforms/FunctionSpecialization/discover-transitive-phis.ll index d009523..8a172db 100644 --- a/llvm/test/Transforms/FunctionSpecialization/discover-transitive-phis.ll +++ b/llvm/test/Transforms/FunctionSpecialization/discover-transitive-phis.ll @@ -29,7 +29,7 @@ entry: define internal i64 @foo(i64 %n, i1 %c1, i1 %c2, i1 %c3, i1 %c4, i1 %c5, i1 %c6, i1 %c7, i1 %c8, i1 %c9, i1 %c10) { ; NOFUNCSPEC-LABEL: define internal range(i64 2, 7) i64 @foo( -; NOFUNCSPEC-SAME: i64 [[N:%.*]], i1 [[C1:%.*]], i1 [[C2:%.*]], i1 [[C3:%.*]], i1 [[C4:%.*]], i1 [[C5:%.*]], i1 [[C6:%.*]], i1 [[C7:%.*]], i1 [[C8:%.*]], i1 [[C9:%.*]], i1 [[C10:%.*]]) { +; NOFUNCSPEC-SAME: i64 range(i64 3, 5) [[N:%.*]], i1 [[C1:%.*]], i1 [[C2:%.*]], i1 [[C3:%.*]], i1 [[C4:%.*]], i1 [[C5:%.*]], i1 [[C6:%.*]], i1 [[C7:%.*]], i1 [[C8:%.*]], i1 [[C9:%.*]], i1 [[C10:%.*]]) { ; NOFUNCSPEC-NEXT: entry: ; NOFUNCSPEC-NEXT: br i1 [[C1]], label [[L1:%.*]], label [[L9:%.*]] ; NOFUNCSPEC: l1: diff --git a/llvm/test/Transforms/InstCombine/fp-floor-ceil.ll b/llvm/test/Transforms/InstCombine/fp-floor-ceil.ll new file mode 100644 index 0000000..c90b7ee --- /dev/null +++ b/llvm/test/Transforms/InstCombine/fp-floor-ceil.ll @@ -0,0 +1,260 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + +define i1 @floor_x_ole(float %x) { +; CHECK-LABEL: @floor_x_ole( +; CHECK-NEXT: [[RET:%.*]] = fcmp ninf ord float [[X:%.*]], 0.000000e+00 +; CHECK-NEXT: ret i1 [[RET]] +; + %floor = call float @llvm.floor.f32(float %x) + %ret = fcmp ninf ole float %floor, %x + ret i1 %ret +} + +define i1 @floor_x_ule(float %x) { +; CHECK-LABEL: @floor_x_ule( +; CHECK-NEXT: ret i1 true +; + %floor = call float @llvm.floor.f32(float %x) + %ret = fcmp ule float %floor, %x + ret i1 %ret +} + +define i1 @floor_x_ogt(float %x) { +; CHECK-LABEL: @floor_x_ogt( +; CHECK-NEXT: ret i1 false +; + %floor = call float @llvm.floor.f32(float %x) + %ret = fcmp ogt float %floor, %x + ret i1 %ret +} + +define i1 @floor_x_ugt(float %x) { +; CHECK-LABEL: @floor_x_ugt( +; CHECK-NEXT: [[RET:%.*]] = fcmp ninf uno float [[X:%.*]], 0.000000e+00 +; CHECK-NEXT: ret i1 [[RET]] +; + %floor = call float @llvm.floor.f32(float %x) + %ret = fcmp ninf ugt float %floor, %x + ret i1 %ret +} + +define i1 @x_floor_oge(float %x) { +; CHECK-LABEL: @x_floor_oge( +; CHECK-NEXT: [[RET:%.*]] = fcmp ninf ord float [[X:%.*]], 0.000000e+00 +; CHECK-NEXT: ret i1 [[RET]] +; + %floor = call float @llvm.floor.f32(float %x) + %ret = fcmp ninf oge float %x, %floor + ret i1 %ret +} + +define i1 @x_floor_uge(float %x) { +; CHECK-LABEL: @x_floor_uge( +; CHECK-NEXT: ret i1 true +; + %floor = call float @llvm.floor.f32(float %x) + %ret = fcmp uge float %x, %floor + ret i1 %ret +} + +define i1 @x_floor_olt(float %x) { +; CHECK-LABEL: @x_floor_olt( +; CHECK-NEXT: ret i1 false +; + %floor = call float @llvm.floor.f32(float %x) + %ret = fcmp olt float %x, %floor + ret i1 %ret +} + +define i1 @x_floor_ult(float %x) { +; CHECK-LABEL: @x_floor_ult( +; CHECK-NEXT: [[RET:%.*]] = fcmp ninf uno float [[X:%.*]], 0.000000e+00 +; CHECK-NEXT: ret i1 [[RET]] +; + %floor = call float @llvm.floor.f32(float %x) + %ret = fcmp ninf ult float %x, %floor + ret i1 %ret +} + +define <2 x i1> @x_floor_olt_vec(<2 x float> %x) { +; CHECK-LABEL: @x_floor_olt_vec( +; CHECK-NEXT: ret <2 x i1> zeroinitializer +; + %floor = call <2 x float> @llvm.floor.f32(<2 x float> %x) + %ret = fcmp olt <2 x float> %x, %floor + ret <2 x i1> %ret +} + +define i1 @x_floor_ole_neg(float %x) { +; CHECK-LABEL: @x_floor_ole_neg( +; CHECK-NEXT: [[FLOOR:%.*]] = call float @llvm.floor.f32(float [[X:%.*]]) +; CHECK-NEXT: [[RET:%.*]] = fcmp ole float [[X]], [[FLOOR]] +; CHECK-NEXT: ret i1 [[RET]] +; + %floor = call float @llvm.floor.f32(float %x) + %ret = fcmp ole float %x, %floor + ret i1 %ret +} + +define i1 @x_floor_ogt_neg(float %x) { +; CHECK-LABEL: @x_floor_ogt_neg( +; CHECK-NEXT: [[FLOOR:%.*]] = call float @llvm.floor.f32(float [[X:%.*]]) +; CHECK-NEXT: [[RET:%.*]] = fcmp ogt float [[X]], [[FLOOR]] +; CHECK-NEXT: ret i1 [[RET]] +; + %floor = call float @llvm.floor.f32(float %x) + %ret = fcmp ogt float %x, %floor + ret i1 %ret +} + +define i1 @x_floor_ueq_neg(float %x) { +; CHECK-LABEL: @x_floor_ueq_neg( +; CHECK-NEXT: [[FLOOR:%.*]] = call float @llvm.floor.f32(float [[X:%.*]]) +; CHECK-NEXT: [[RET:%.*]] = fcmp ueq float [[X]], [[FLOOR]] +; CHECK-NEXT: ret i1 [[RET]] +; + %floor = call float @llvm.floor.f32(float %x) + %ret = fcmp ueq float %x, %floor + ret i1 %ret +} + +define i1 @x_floor_une_neg(float %x) { +; CHECK-LABEL: @x_floor_une_neg( +; CHECK-NEXT: [[FLOOR:%.*]] = call float @llvm.floor.f32(float [[X:%.*]]) +; CHECK-NEXT: [[RET:%.*]] = fcmp une float [[X]], [[FLOOR]] +; CHECK-NEXT: ret i1 [[RET]] +; + %floor = call float @llvm.floor.f32(float %x) + %ret = fcmp une float %x, %floor + ret i1 %ret +} + +define i1 @ceil_x_oge(float %x) { +; CHECK-LABEL: @ceil_x_oge( +; CHECK-NEXT: [[RET:%.*]] = fcmp ninf ord float [[X:%.*]], 0.000000e+00 +; CHECK-NEXT: ret i1 [[RET]] +; + %ceil = call float @llvm.ceil.f32(float %x) + %ret = fcmp ninf oge float %ceil, %x + ret i1 %ret +} + +define i1 @ceil_x_uge(float %x) { +; CHECK-LABEL: @ceil_x_uge( +; CHECK-NEXT: ret i1 true +; + %ceil = call float @llvm.ceil.f32(float %x) + %ret = fcmp uge float %ceil, %x + ret i1 %ret +} + +define i1 @ceil_x_olt(float %x) { +; CHECK-LABEL: @ceil_x_olt( +; CHECK-NEXT: ret i1 false +; + %ceil = call float @llvm.ceil.f32(float %x) + %ret = fcmp olt float %ceil, %x + ret i1 %ret +} + +define i1 @ceil_x_ult(float %x) { +; CHECK-LABEL: @ceil_x_ult( +; CHECK-NEXT: [[RET:%.*]] = fcmp ninf uno float [[X:%.*]], 0.000000e+00 +; CHECK-NEXT: ret i1 [[RET]] +; + %ceil = call float @llvm.ceil.f32(float %x) + %ret = fcmp ninf ult float %ceil, %x + ret i1 %ret +} + +define i1 @x_ceil_ole(float %x) { +; CHECK-LABEL: @x_ceil_ole( +; CHECK-NEXT: [[RET:%.*]] = fcmp ninf ord float [[X:%.*]], 0.000000e+00 +; CHECK-NEXT: ret i1 [[RET]] +; + %ceil = call float @llvm.ceil.f32(float %x) + %ret = fcmp ninf ole float %x, %ceil + ret i1 %ret +} + +define i1 @x_ceil_ule(float %x) { +; CHECK-LABEL: @x_ceil_ule( +; CHECK-NEXT: ret i1 true +; + %ceil = call float @llvm.ceil.f32(float %x) + %ret = fcmp ule float %x, %ceil + ret i1 %ret +} + +define i1 @x_ceil_ogt(float %x) { +; CHECK-LABEL: @x_ceil_ogt( +; CHECK-NEXT: ret i1 false +; + %ceil = call float @llvm.ceil.f32(float %x) + %ret = fcmp ogt float %x, %ceil + ret i1 %ret +} + +define i1 @x_ceil_ugt(float %x) { +; CHECK-LABEL: @x_ceil_ugt( +; CHECK-NEXT: [[RET:%.*]] = fcmp ninf uno float [[X:%.*]], 0.000000e+00 +; CHECK-NEXT: ret i1 [[RET]] +; + %ceil = call float @llvm.ceil.f32(float %x) + %ret = fcmp ninf ugt float %x, %ceil + ret i1 %ret +} + +define <2 x i1> @x_ceil_ogt_vec(<2 x float> %x) { +; CHECK-LABEL: @x_ceil_ogt_vec( +; CHECK-NEXT: ret <2 x i1> zeroinitializer +; + %ceil = call <2 x float> @llvm.ceil.f32(<2 x float> %x) + %ret = fcmp ogt <2 x float> %x, %ceil + ret <2 x i1> %ret +} + +define i1 @x_ceil_oge_neg(float %x) { +; CHECK-LABEL: @x_ceil_oge_neg( +; CHECK-NEXT: [[CEIL:%.*]] = call float @llvm.ceil.f32(float [[X:%.*]]) +; CHECK-NEXT: [[RET:%.*]] = fcmp oge float [[X]], [[CEIL]] +; CHECK-NEXT: ret i1 [[RET]] +; + %ceil = call float @llvm.ceil.f32(float %x) + %ret = fcmp oge float %x, %ceil + ret i1 %ret +} + +define i1 @x_ceil_olt_neg(float %x) { +; CHECK-LABEL: @x_ceil_olt_neg( +; CHECK-NEXT: [[CEIL:%.*]] = call float @llvm.ceil.f32(float [[X:%.*]]) +; CHECK-NEXT: [[RET:%.*]] = fcmp olt float [[X]], [[CEIL]] +; CHECK-NEXT: ret i1 [[RET]] +; + %ceil = call float @llvm.ceil.f32(float %x) + %ret = fcmp olt float %x, %ceil + ret i1 %ret +} + +define i1 @x_ceil_oeq_neg(float %x) { +; CHECK-LABEL: @x_ceil_oeq_neg( +; CHECK-NEXT: [[CEIL:%.*]] = call float @llvm.ceil.f32(float [[X:%.*]]) +; CHECK-NEXT: [[RET:%.*]] = fcmp oeq float [[X]], [[CEIL]] +; CHECK-NEXT: ret i1 [[RET]] +; + %ceil = call float @llvm.ceil.f32(float %x) + %ret = fcmp oeq float %x, %ceil + ret i1 %ret +} + +define i1 @x_ceil_one_neg(float %x) { +; CHECK-LABEL: @x_ceil_one_neg( +; CHECK-NEXT: [[CEIL:%.*]] = call float @llvm.ceil.f32(float [[X:%.*]]) +; CHECK-NEXT: [[RET:%.*]] = fcmp one float [[X]], [[CEIL]] +; CHECK-NEXT: ret i1 [[RET]] +; + %ceil = call float @llvm.ceil.f32(float %x) + %ret = fcmp one float %x, %ceil + ret i1 %ret +} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll new file mode 100644 index 0000000..9ca6c52 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll @@ -0,0 +1,253 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -p loop-vectorize -force-target-instruction-cost=1 -S %s | FileCheck %s + +target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128-Fn32" +target triple = "arm64-apple-macosx14.0.0" + +define double @test_reduction_costs() { +; CHECK-LABEL: define double @test_reduction_costs() { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi double [ 0.000000e+00, %[[VECTOR_PH]] ], [ [[TMP0:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi double [ 0.000000e+00, %[[VECTOR_PH]] ], [ [[TMP1:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0]] = call double @llvm.vector.reduce.fadd.v2f64(double [[VEC_PHI]], <2 x double> <double 3.000000e+00, double 3.000000e+00>) +; CHECK-NEXT: [[TMP1]] = call double @llvm.vector.reduce.fadd.v2f64(double [[VEC_PHI1]], <2 x double> <double 9.000000e+00, double 9.000000e+00>) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 2, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi double [ [[TMP0]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX2:%.*]] = phi double [ [[TMP1]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP_1:.*]] +; CHECK: [[LOOP_1]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_1]] ] +; CHECK-NEXT: [[R_1:%.*]] = phi double [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[R_1_NEXT:%.*]], %[[LOOP_1]] ] +; CHECK-NEXT: [[R_2:%.*]] = phi double [ [[BC_MERGE_RDX2]], %[[SCALAR_PH]] ], [ [[R_2_NEXT:%.*]], %[[LOOP_1]] ] +; CHECK-NEXT: [[R_1_NEXT]] = fadd double [[R_1]], 3.000000e+00 +; CHECK-NEXT: [[R_2_NEXT]] = fadd double [[R_2]], 9.000000e+00 +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_1]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[R_1_NEXT_LCSSA:%.*]] = phi double [ [[R_1_NEXT]], %[[LOOP_1]] ], [ [[TMP0]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[R_2_NEXT_LCSSA:%.*]] = phi double [ [[R_2_NEXT]], %[[LOOP_1]] ], [ [[TMP1]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[DIV:%.*]] = fmul double [[R_1_NEXT_LCSSA]], [[R_2_NEXT_LCSSA]] +; CHECK-NEXT: ret double [[DIV]] +; +entry: + br label %loop.1 + +loop.1: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.1 ] + %r.1 = phi double [ 0.000000e+00, %entry ], [ %r.1.next, %loop.1 ] + %r.2 = phi double [ 0.000000e+00, %entry ], [ %r.2.next, %loop.1 ] + %r.1.next = fadd double %r.1, 3.000000e+00 + %r.2.next = fadd double %r.2, 9.000000e+00 + %iv.next = add i64 %iv, 1 + %ec = icmp eq i64 %iv, 1 + br i1 %ec, label %exit, label %loop.1 + +exit: + %div = fmul double %r.1.next, %r.2.next + ret double %div +} + +define void @test_iv_cost(ptr %ptr.start, i8 %a, i64 %b) { +; CHECK-LABEL: define void @test_iv_cost( +; CHECK-SAME: ptr [[PTR_START:%.*]], i8 [[A:%.*]], i64 [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[A_EXT:%.*]] = zext i8 [[A]] to i64 +; CHECK-NEXT: [[START:%.*]] = call i64 @llvm.umin.i64(i64 [[B]], i64 [[A_EXT]]) +; CHECK-NEXT: [[C:%.*]] = icmp eq i64 [[START]], 0 +; CHECK-NEXT: br i1 [[C]], label %[[EXIT:.*]], label %[[ITER_CHECK:.*]] +; CHECK: [[ITER_CHECK]]: +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[START]], 8 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: +; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[START]], 32 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[START]], 32 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[START]], [[N_MOD_VF]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX1]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX1]], 16 +; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[PTR_START]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i32 16 +; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP6]], align 1 +; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP3]], align 1 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], 32 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[START]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] +; CHECK: [[VEC_EPILOG_ITER_CHECK]]: +; CHECK-NEXT: [[IND_END6:%.*]] = getelementptr i8, ptr [[PTR_START]], i64 [[N_VEC]] +; CHECK-NEXT: [[IND_END:%.*]] = sub i64 [[START]], [[N_VEC]] +; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[START]], [[N_VEC]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; CHECK: [[VEC_EPILOG_PH]]: +; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[START]], 8 +; CHECK-NEXT: [[N_VEC3:%.*]] = sub i64 [[START]], [[N_MOD_VF2]] +; CHECK-NEXT: [[IND_END1:%.*]] = sub i64 [[START]], [[N_VEC3]] +; CHECK-NEXT: [[IND_END5:%.*]] = getelementptr i8, ptr [[PTR_START]], i64 [[N_VEC3]] +; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; CHECK: [[VEC_EPILOG_VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT10:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR_START]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 0 +; CHECK-NEXT: store <8 x i8> zeroinitializer, ptr [[TMP2]], align 1 +; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT10]], [[N_VEC3]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N11:%.*]] = icmp eq i64 [[START]], [[N_VEC3]] +; CHECK-NEXT: br i1 [[CMP_N11]], label %[[EXIT_LOOPEXIT]], label %[[VEC_EPILOG_SCALAR_PH]] +; CHECK: [[VEC_EPILOG_SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END1]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[START]], %[[ITER_CHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL7:%.*]] = phi ptr [ [[IND_END5]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END6]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[PTR_START]], %[[ITER_CHECK]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ] +; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL7]], %[[VEC_EPILOG_SCALAR_PH]] ] +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], -1 +; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 1 +; CHECK-NEXT: store i8 0, ptr [[PTR_IV]], align 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 0 +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK: [[EXIT_LOOPEXIT]]: +; CHECK-NEXT: br label %[[EXIT]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + %a.ext = zext i8 %a to i64 + %start = call i64 @llvm.umin.i64(i64 %b, i64 %a.ext) + %c = icmp eq i64 %start, 0 + br i1 %c, label %exit, label %loop + +loop: + %iv = phi i64 [ %start, %entry ], [ %iv.next, %loop ] + %ptr.iv = phi ptr [ %ptr.start, %entry ], [ %ptr.iv.next, %loop ] + %iv.next = add i64 %iv, -1 + %ptr.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 + store i8 0, ptr %ptr.iv, align 1 + %ec = icmp eq i64 %iv.next, 0 + br i1 %ec, label %exit, label %loop + +exit: + ret void +} + +define void @test_exit_branch_cost(ptr %dst, i64 %x, i32 %y, ptr %dst.1, i1 %c.4, ptr %src, ptr %dst.3, i1 %c.3, ptr %dst.2) { +; CHECK-LABEL: define void @test_exit_branch_cost( +; CHECK-SAME: ptr [[DST:%.*]], i64 [[X:%.*]], i32 [[Y:%.*]], ptr [[DST_1:%.*]], i1 [[C_4:%.*]], ptr [[SRC:%.*]], ptr [[DST_3:%.*]], i1 [[C_3:%.*]], ptr [[DST_2:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] +; CHECK: [[LOOP_HEADER]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[C1:%.*]] = icmp eq i64 [[X]], 0 +; CHECK-NEXT: br i1 [[C1]], label %[[THEN_4:.*]], label %[[THEN_1:.*]] +; CHECK: [[THEN_1]]: +; CHECK-NEXT: [[AND32831:%.*]] = and i32 [[Y]], 1 +; CHECK-NEXT: store i64 0, ptr [[DST_1]], align 8 +; CHECK-NEXT: [[C_2:%.*]] = icmp eq i32 [[Y]], 0 +; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[C_4]], i1 [[C_3]], i1 false +; CHECK-NEXT: br i1 [[OR_COND]], label %[[THEN_2:.*]], label %[[ELSE_1:.*]] +; CHECK: [[ELSE_1]]: +; CHECK-NEXT: store i64 0, ptr [[DST_3]], align 8 +; CHECK-NEXT: br label %[[THEN_2]] +; CHECK: [[THEN_2]]: +; CHECK-NEXT: br i1 [[C_3]], label %[[THEN_3:.*]], label %[[LOOP_LATCH]] +; CHECK: [[THEN_3]]: +; CHECK-NEXT: br i1 [[C_4]], label %[[THEN_5:.*]], label %[[ELSE_2:.*]] +; CHECK: [[THEN_4]]: +; CHECK-NEXT: call void @llvm.assume(i1 [[C_4]]) +; CHECK-NEXT: br label %[[THEN_5]] +; CHECK: [[THEN_5]]: +; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ 1, %[[THEN_4]] ], [ 0, %[[THEN_3]] ] +; CHECK-NEXT: store i64 [[TMP0]], ptr [[DST_2]], align 8 +; CHECK-NEXT: br label %[[ELSE_2]] +; CHECK: [[ELSE_2]]: +; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[SRC]], align 8 +; CHECK-NEXT: store i64 [[L]], ptr [[DST]], align 8 +; CHECK-NEXT: br label %[[LOOP_LATCH]] +; CHECK: [[LOOP_LATCH]]: +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 64 +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop.header + +loop.header: + %iv = phi i64 [ %iv.next, %loop.latch ], [ 0, %entry ] + %c1 = icmp eq i64 %x, 0 + br i1 %c1, label %then.4, label %then.1 + +then.1: + %and32831 = and i32 %y, 1 + store i64 0, ptr %dst.1, align 8 + %c.2 = icmp eq i32 %y, 0 + %or.cond = select i1 %c.4, i1 %c.3, i1 false + br i1 %or.cond, label %then.2, label %else.1 + +else.1: ; preds = %then.1 + store i64 0, ptr %dst.3, align 8 + br label %then.2 + +then.2: + br i1 %c.3, label %then.3, label %loop.latch + +then.3: + br i1 %c.4, label %then.5, label %else.2 + +then.4: + call void @llvm.assume(i1 %c.4) + br label %then.5 + +then.5: + %1 = phi i64 [ 1, %then.4 ], [ 0, %then.3 ] + store i64 %1, ptr %dst.2, align 8 + br label %else.2 + +else.2: + %l = load i64, ptr %src, align 8 + store i64 %l, ptr %dst, align 8 + br label %loop.latch + +loop.latch: + %iv.next = add i64 %iv, 1 + %ec = icmp eq i64 %iv, 64 + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} + +declare void @llvm.assume(i1 noundef) +declare i64 @llvm.umin.i64(i64, i64) +;. +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} +; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} +; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} +; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]} +;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll index 38af580..349fd13 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll @@ -58,6 +58,7 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; CHECK-NEXT: vp<%2> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<for.body.preheader>: +; CHECK-NEXT: IR %0 = zext i32 %n to i64 ; CHECK-NEXT: EMIT vp<%2> = EXPAND SCEV (zext i32 %n to i64) ; CHECK-NEXT: No successors ; CHECK-EMPTY: @@ -141,6 +142,7 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; CHECK-NEXT: vp<%2> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<for.body.preheader>: +; CHECK-NEXT: IR %0 = zext i32 %n to i64 ; CHECK-NEXT: EMIT vp<%2> = EXPAND SCEV (zext i32 %n to i64) ; CHECK-NEXT: No successors ; CHECK-EMPTY: @@ -260,6 +262,7 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur ; CHECK-NEXT: vp<%2> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<for.body.preheader>: +; CHECK-NEXT: IR %0 = zext i32 %n to i64 ; CHECK-NEXT: EMIT vp<%2> = EXPAND SCEV (zext i32 %n to i64) ; CHECK-NEXT: No successors ; CHECK-EMPTY: @@ -343,6 +346,7 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur ; CHECK-NEXT: vp<%2> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<for.body.preheader>: +; CHECK-NEXT: IR %0 = zext i32 %n to i64 ; CHECK-NEXT: EMIT vp<%2> = EXPAND SCEV (zext i32 %n to i64) ; CHECK-NEXT: No successors ; CHECK-EMPTY: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll index f14ffe85..11405a1 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll @@ -60,12 +60,11 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: Successor(s): ir-bb<for.end>, scalar.ph ; IF-EVL-INLOOP-EMPTY: ; IF-EVL-INLOOP-NEXT: ir-bb<for.end>: +; IF-EVL-INLOOP-NEXT: IR %add.lcssa = phi i32 [ %add, %for.body ] (extra operand: vp<[[RDX_EX]]>) ; IF-EVL-INLOOP-NEXT: No successors ; IF-EVL-INLOOP-EMPTY: ; IF-EVL-INLOOP-NEXT: scalar.ph: ; IF-EVL-INLOOP-NEXT: No successors -; IF-EVL-INLOOP-EMPTY: -; IF-EVL-INLOOP-NEXT: Live-out i32 %add.lcssa = vp<[[RDX_EX]]> ; IF-EVL-INLOOP-NEXT: } ; @@ -100,12 +99,11 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) { ; NO-VP-OUTLOOP-NEXT: Successor(s): ir-bb<for.end>, scalar.ph ; NO-VP-OUTLOOP-EMPTY: ; NO-VP-OUTLOOP-NEXT: ir-bb<for.end>: +; NO-VP-OUTLOOP-NEXT: IR %add.lcssa = phi i32 [ %add, %for.body ] (extra operand: vp<[[RDX_EX]]>) ; NO-VP-OUTLOOP-NEXT: No successors ; NO-VP-OUTLOOP-EMPTY: ; NO-VP-OUTLOOP-NEXT: scalar.ph: ; NO-VP-OUTLOOP-NEXT: No successors -; NO-VP-OUTLOOP-EMPTY: -; NO-VP-OUTLOOP-NEXT: Live-out i32 %add.lcssa = vp<[[RDX_EX]]> ; NO-VP-OUTLOOP-NEXT: } ; @@ -140,12 +138,11 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) { ; NO-VP-INLOOP-NEXT: Successor(s): ir-bb<for.end>, scalar.ph ; NO-VP-INLOOP-EMPTY: ; NO-VP-INLOOP-NEXT: ir-bb<for.end>: +; NO-VP-INLOOP-NEXT: IR %add.lcssa = phi i32 [ %add, %for.body ] (extra operand: vp<[[RDX_EX]]>) ; NO-VP-INLOOP-NEXT: No successors ; NO-VP-INLOOP-EMPTY: ; NO-VP-INLOOP-NEXT: scalar.ph: ; NO-VP-INLOOP-NEXT: No successors -; NO-VP-INLOOP-EMPTY: -; NO-VP-INLOOP-NEXT: Live-out i32 %add.lcssa = vp<[[RDX_EX]]> ; NO-VP-INLOOP-NEXT: } ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll index 69b8519..45545fe 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll @@ -227,6 +227,7 @@ define i32 @sink_replicate_region_3_reduction(i32 %x, i8 %y, ptr %ptr) optsize { ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<exit> +; CHECK-NEXT: IR %res = phi i32 [ %and.red.next, %loop ] (extra operand: vp<[[RED_EX]]>) ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph @@ -234,7 +235,6 @@ define i32 @sink_replicate_region_3_reduction(i32 %x, i8 %y, ptr %ptr) optsize { ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: Live-out i32 %recur = vp<[[RESUME_1_P]]> -; CHECK-NEXT: Live-out i32 %res = vp<[[RED_EX]]> ; CHECK-NEXT: } ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/opaque-ptr.ll b/llvm/test/Transforms/LoopVectorize/opaque-ptr.ll index e1770ac..13e79a4 100644 --- a/llvm/test/Transforms/LoopVectorize/opaque-ptr.ll +++ b/llvm/test/Transforms/LoopVectorize/opaque-ptr.ll @@ -1,9 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -S -passes=loop-vectorize -force-vector-width=2 < %s | FileCheck %s -; TODO: This still crashes with inbounds on the GEPs. -define void @test(ptr %p1.start, ptr %p2.start, ptr %p1.end) { -; CHECK-LABEL: @test( +define void @test_ptr_iv_no_inbounds(ptr %p1.start, ptr %p2.start, ptr %p1.end) { +; CHECK-LABEL: @test_ptr_iv_no_inbounds( ; CHECK-NEXT: entry: ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: @@ -39,6 +38,96 @@ exit: ret void } +define void @test_ptr_iv_with_inbounds(ptr %p1.start, ptr %p2.start, ptr %p1.end) { +; CHECK-LABEL: @test_ptr_iv_with_inbounds( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[P1_START5:%.*]] = ptrtoint ptr [[P1_START:%.*]] to i64 +; CHECK-NEXT: [[P1_END4:%.*]] = ptrtoint ptr [[P1_END:%.*]] to i64 +; CHECK-NEXT: [[P1_START2:%.*]] = ptrtoint ptr [[P1_START]] to i64 +; CHECK-NEXT: [[P1_END1:%.*]] = ptrtoint ptr [[P1_END]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[P1_END4]], -4 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[P1_START5]] +; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 2 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] +; CHECK: vector.memcheck: +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[P1_END1]], -4 +; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP4]], [[P1_START2]] +; CHECK-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP5]], 2 +; CHECK-NEXT: [[TMP7:%.*]] = shl nuw i64 [[TMP6]], 2 +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], 4 +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P1_START]], i64 [[TMP8]] +; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[P2_START:%.*]], i64 [[TMP8]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[P1_START]], [[SCEVGEP3]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[P2_START]], [[SCEVGEP]] +; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 2 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[N_VEC]], 4 +; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[P1_START]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[N_VEC]], 4 +; CHECK-NEXT: [[IND_END6:%.*]] = getelementptr i8, ptr [[P2_START]], i64 [[TMP10]] +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], 0 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[P1_START]], i64 [[TMP11]] +; CHECK-NEXT: [[OFFSET_IDX8:%.*]] = mul i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX8]], 0 +; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[P2_START]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[NEXT_GEP]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP13]], align 4, !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[NEXT_GEP9]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <2 x float>, ptr [[TMP14]], align 4, !alias.scope [[META3]] +; CHECK-NEXT: [[TMP15:%.*]] = fadd <2 x float> [[WIDE_LOAD]], [[WIDE_LOAD10]] +; CHECK-NEXT: store <2 x float> [[TMP15]], ptr [[TMP13]], align 4, !alias.scope [[META0]], !noalias [[META3]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[P1_START]], [[ENTRY:%.*]] ], [ [[P1_START]], [[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL7:%.*]] = phi ptr [ [[IND_END6]], [[MIDDLE_BLOCK]] ], [ [[P2_START]], [[ENTRY]] ], [ [[P2_START]], [[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: [[P1:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[P1_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[BC_RESUME_VAL7]], [[SCALAR_PH]] ], [ [[P2_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[P1_VAL:%.*]] = load float, ptr [[P1]], align 4 +; CHECK-NEXT: [[P2_VAL:%.*]] = load float, ptr [[P2]], align 4 +; CHECK-NEXT: [[SUM:%.*]] = fadd float [[P1_VAL]], [[P2_VAL]] +; CHECK-NEXT: store float [[SUM]], ptr [[P1]], align 4 +; CHECK-NEXT: [[P1_NEXT]] = getelementptr inbounds float, ptr [[P1]], i64 1 +; CHECK-NEXT: [[P2_NEXT]] = getelementptr inbounds float, ptr [[P2]], i64 1 +; CHECK-NEXT: [[C:%.*]] = icmp ne ptr [[P1_NEXT]], [[P1_END]] +; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK: exit: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %p1 = phi ptr [ %p1.start, %entry ], [ %p1.next, %loop ] + %p2 = phi ptr [ %p2.start, %entry ], [ %p2.next, %loop ] + %p1.val = load float, ptr %p1 + %p2.val = load float, ptr %p2 + %sum = fadd float %p1.val, %p2.val + store float %sum, ptr %p1 + %p1.next = getelementptr inbounds float, ptr %p1, i64 1 + %p2.next = getelementptr inbounds float, ptr %p2, i64 1 + %c = icmp ne ptr %p1.next, %p1.end + br i1 %c, label %loop, label %exit + +exit: + ret void +} + define void @store_pointer_induction(ptr %start, ptr %end) { ; CHECK-LABEL: @store_pointer_induction( ; CHECK-NEXT: entry: @@ -66,7 +155,7 @@ define void @store_pointer_induction(ptr %start, ptr %end) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 16 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] @@ -78,7 +167,7 @@ define void @store_pointer_induction(ptr %start, ptr %end) { ; CHECK-NEXT: store ptr [[IV]], ptr [[IV]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = getelementptr inbounds ptr, ptr [[IV]], i32 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq ptr [[IV_NEXT]], [[END]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/pr36983.ll b/llvm/test/Transforms/LoopVectorize/pr36983.ll index 2068945..7e38d60 100644 --- a/llvm/test/Transforms/LoopVectorize/pr36983.ll +++ b/llvm/test/Transforms/LoopVectorize/pr36983.ll @@ -3,8 +3,8 @@ ; There could be more than one LCSSA PHIs in loop exit block. ; CHECK-LABEL: bb1.bb3_crit_edge: -; CHECK: %_tmp133.lcssa1 = phi i16 [ %_tmp133, %bb2 ], [ %vector.recur.extract.for.phi1, %middle.block ] -; CHECK: %_tmp133.lcssa = phi i16 [ %_tmp133, %bb2 ], [ %vector.recur.extract.for.phi, %middle.block ] +; CHECK: %_tmp133.lcssa1 = phi i16 [ %_tmp133, %bb2 ], [ %vector.recur.extract.for.phi, %middle.block ] +; CHECK: %_tmp133.lcssa = phi i16 [ %_tmp133, %bb2 ], [ %vector.recur.extract.for.phi1, %middle.block ] define void @f1() { bb2.lr.ph: diff --git a/llvm/test/Transforms/LoopVectorize/pr45259.ll b/llvm/test/Transforms/LoopVectorize/pr45259.ll index dcc8f3f..0089716 100644 --- a/llvm/test/Transforms/LoopVectorize/pr45259.ll +++ b/llvm/test/Transforms/LoopVectorize/pr45259.ll @@ -14,11 +14,12 @@ define i8 @widget(ptr %arr, i8 %t9) { ; CHECK-NEXT: br i1 [[C]], label [[FOR_PREHEADER:%.*]], label [[BB6]] ; CHECK: for.preheader: ; CHECK-NEXT: [[T1_0_LCSSA:%.*]] = phi ptr [ [[T1_0]], [[BB6]] ] -; CHECK-NEXT: [[T1_0_LCSSA2:%.*]] = ptrtoint ptr [[T1_0_LCSSA]] to i64 ; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[ARR1]] to i32 ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[TMP0]] -; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[T1_0_LCSSA2]] to i32 +; CHECK-NEXT: [[T1_0_LCSSA3:%.*]] = ptrtoint ptr [[T1_0_LCSSA]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[T1_0_LCSSA3]] to i32 ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[T1_0_LCSSA2:%.*]] = ptrtoint ptr [[T1_0_LCSSA]] to i64 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP3]], 4 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; CHECK: vector.scevcheck: diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll index e4984f52..431d14b 100644 --- a/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll +++ b/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll @@ -14,6 +14,7 @@ define void @test_tc_less_than_16(ptr %A, i64 %N) { ; CHECK-NEXT: vp<[[TC:%.+]]> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: IR %and = and i64 %N, 15 ; CHECK-NEXT: EMIT vp<[[TC]]> = EXPAND SCEV (zext i4 (trunc i64 %N to i4) to i64) ; CHECK-NEXT: No successors ; CHECK-EMPTY: @@ -55,6 +56,7 @@ define void @test_tc_less_than_16(ptr %A, i64 %N) { ; CHECK-NEXT: vp<[[TC:%.+]]> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: IR %and = and i64 %N, 15 ; CHECK-NEXT: EMIT vp<[[TC]]> = EXPAND SCEV (zext i4 (trunc i64 %N to i4) to i64) ; CHECK-NEXT: No successors ; CHECK-EMPTY: diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing.ll index dd2b724..26974c2 100644 --- a/llvm/test/Transforms/LoopVectorize/vplan-printing.ll +++ b/llvm/test/Transforms/LoopVectorize/vplan-printing.ll @@ -161,12 +161,11 @@ define float @print_reduction(i64 %n, ptr noalias %y) { ; CHECK-NEXT: Successor(s): ir-bb<for.end>, scalar.ph ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<for.end> +; CHECK-NEXT: IR %red.next.lcssa = phi float [ %red.next, %for.body ] (extra operand: vp<[[RED_EX]]>) ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph ; CHECK-NEXT: No successors -; CHECK-EMPTY: -; CHECK-NEXT: Live-out float %red.next.lcssa = vp<[[RED_EX]]> ; CHECK-NEXT: } ; entry: @@ -444,12 +443,11 @@ define float @print_fmuladd_strict(ptr %a, ptr %b, i64 %n) { ; CHECK-NEXT: Successor(s): ir-bb<for.end>, scalar.ph ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<for.end> +; CHECK-NEXT: IR %muladd.lcssa = phi float [ %muladd, %for.body ] (extra operand: vp<[[RED_EX]]>) ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph ; CHECK-NEXT: No successors -; CHECK-EMPTY: -; CHECK-NEXT: Live-out float %muladd.lcssa = vp<[[RED_EX]]> ; CHECK-NEXT:} entry: @@ -577,6 +575,8 @@ define void @print_expand_scev(i64 %y, ptr %ptr) { ; CHECK-NEXT: vp<[[TC:%.+]]> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: IR %div = udiv i64 %y, 492802768830814060 +; CHECK-NEXT: IR %inc = add i64 %div, 1 ; CHECK-NEXT: EMIT vp<[[TC]]> = EXPAND SCEV (1 + ((15 + (%y /u 492802768830814060))<nuw><nsw> /u (1 + (%y /u 492802768830814060))<nuw><nsw>))<nuw><nsw> ; CHECK-NEXT: EMIT vp<[[EXP_SCEV:%.+]]> = EXPAND SCEV (1 + (%y /u 492802768830814060))<nuw><nsw> ; CHECK-NEXT: No successors @@ -666,12 +666,11 @@ define i32 @print_exit_value(ptr %ptr, i32 %off) { ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<exit> +; CHECK-NEXT: IR %lcssa = phi i32 [ %add, %loop ] (extra operand: vp<[[EXIT]]>) ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph ; CHECK-NEXT: No successors -; CHECK-EMPTY: -; CHECK-NEXT: Live-out i32 %lcssa = vp<[[EXIT]]> ; CHECK-NEXT: } ; entry: @@ -1037,6 +1036,7 @@ define i16 @print_first_order_recurrence_and_result(ptr %ptr) { ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<exit> +; CHECK-NEXT: IR %for.1.lcssa = phi i16 [ %for.1, %loop ] (extra operand: vp<[[FOR_RESULT]]>) ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph @@ -1044,7 +1044,6 @@ define i16 @print_first_order_recurrence_and_result(ptr %ptr) { ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: Live-out i16 %for.1 = vp<[[RESUME_P]]> -; CHECK-NEXT: Live-out i16 %for.1.lcssa = vp<[[FOR_RESULT]]> ; CHECK-NEXT: } ; entry: diff --git a/llvm/test/Transforms/SCCP/ip-add-range-to-call.ll b/llvm/test/Transforms/SCCP/ip-add-range-to-call.ll index 91efbcc..14a5900 100644 --- a/llvm/test/Transforms/SCCP/ip-add-range-to-call.ll +++ b/llvm/test/Transforms/SCCP/ip-add-range-to-call.ll @@ -6,7 +6,7 @@ ; can be added to call sites. define internal i32 @callee(i32 %x) { ; CHECK-LABEL: define internal range(i32 0, 21) i32 @callee( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 range(i32 0, 21) [[X:%.*]]) { ; CHECK-NEXT: ret i32 [[X]] ; ret i32 %x @@ -133,7 +133,7 @@ define void @caller_cb3() { ; should be added at call sites. define internal i32 @callee5(i32 %x, i32 %y) { ; CHECK-LABEL: define internal i32 @callee5( -; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-SAME: i32 range(i32 10, 21) [[X:%.*]], i32 range(i32 100, 201) [[Y:%.*]]) { ; CHECK-NEXT: [[C:%.*]] = icmp slt i32 [[X]], 15 ; CHECK-NEXT: br i1 [[C]], label [[BB1:%.*]], label [[BB2:%.*]] ; CHECK: bb1: diff --git a/llvm/test/Transforms/SCCP/ip-constant-ranges.ll b/llvm/test/Transforms/SCCP/ip-constant-ranges.ll index c0cdfaf..618c6f6 100644 --- a/llvm/test/Transforms/SCCP/ip-constant-ranges.ll +++ b/llvm/test/Transforms/SCCP/ip-constant-ranges.ll @@ -4,7 +4,7 @@ ; Constant range for %a is [1, 48) and for %b is [301, 1000) define internal i32 @f1(i32 %a, i32 %b) { ; CHECK-LABEL: define {{[^@]+}}@f1 -; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]]) { +; CHECK-SAME: (i32 range(i32 1, 48) [[A:%.*]], i32 range(i32 301, 1000) [[B:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: ret i32 poison ; @@ -27,7 +27,7 @@ entry: ; Constant range for %x is [47, 302) define internal i32 @f2(i32 %x) { ; CHECK-LABEL: define {{[^@]+}}@f2 -; CHECK-SAME: (i32 [[X:%.*]]) { +; CHECK-SAME: (i32 range(i32 47, 302) [[X:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], 300 ; CHECK-NEXT: [[CMP4:%.*]] = icmp ugt i32 [[X]], 300 @@ -79,7 +79,7 @@ entry: define internal i32 @f3(i32 %x) { ; CHECK-LABEL: define {{[^@]+}}@f3 -; CHECK-SAME: (i32 [[X:%.*]]) { +; CHECK-SAME: (i32 range(i32 0, 2) [[X:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: ret i32 poison ; @@ -116,7 +116,7 @@ end: define internal i32 @f4(i32 %x) { ; CHECK-LABEL: define {{[^@]+}}@f4 -; CHECK-SAME: (i32 [[X:%.*]]) { +; CHECK-SAME: (i32 range(i32 301, -2147483648) [[X:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: ret i32 poison ; @@ -170,7 +170,7 @@ entry: define internal i1 @test_unreachable_callee(i32 %a) { ; CHECK-LABEL: define {{[^@]+}}@test_unreachable_callee -; CHECK-SAME: (i32 [[A:%.*]]) { +; CHECK-SAME: (i32 range(i32 1, 3) [[A:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: ret i1 poison ; @@ -199,7 +199,7 @@ define double @test_struct({ double, double } %test) { ; Constant range for %x is [47, 302) define internal i32 @f5(i32 %x) { ; CHECK-LABEL: define {{[^@]+}}@f5 -; CHECK-SAME: (i32 [[X:%.*]]) { +; CHECK-SAME: (i32 range(i32 47, 302) [[X:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], undef ; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 undef, [[X]] @@ -282,7 +282,7 @@ entry: define internal i32 @callee6.1(i32 %i) { ; CHECK-LABEL: define {{[^@]+}}@callee6.1 -; CHECK-SAME: (i32 [[I:%.*]]) { +; CHECK-SAME: (i32 range(i32 30, 44) [[I:%.*]]) { ; CHECK-NEXT: [[RES:%.*]] = call i32 @callee6.2(i32 [[I]]) ; CHECK-NEXT: ret i32 poison ; @@ -292,7 +292,7 @@ define internal i32 @callee6.1(i32 %i) { define internal i32 @callee6.2(i32 %i) { ; CHECK-LABEL: define {{[^@]+}}@callee6.2 -; CHECK-SAME: (i32 [[I:%.*]]) { +; CHECK-SAME: (i32 range(i32 30, 44) [[I:%.*]]) { ; CHECK-NEXT: br label [[IF_THEN:%.*]] ; CHECK: if.then: ; CHECK-NEXT: ret i32 poison diff --git a/llvm/test/Transforms/SCCP/ip-ranges-casts.ll b/llvm/test/Transforms/SCCP/ip-ranges-casts.ll index e8d4175..d9dc045 100644 --- a/llvm/test/Transforms/SCCP/ip-ranges-casts.ll +++ b/llvm/test/Transforms/SCCP/ip-ranges-casts.ll @@ -4,7 +4,7 @@ ; x = [100, 301) define internal i1 @f.trunc(i32 %x) { ; CHECK-LABEL: define internal i1 @f.trunc( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 range(i32 100, 301) [[X:%.*]]) { ; CHECK-NEXT: [[T_1:%.*]] = trunc nuw nsw i32 [[X]] to i16 ; CHECK-NEXT: [[C_2:%.*]] = icmp sgt i16 [[T_1]], 299 ; CHECK-NEXT: [[C_4:%.*]] = icmp slt i16 [[T_1]], 101 @@ -60,7 +60,7 @@ define i1 @caller1() { ; x = [100, 301) define internal i1 @f.zext(i32 %x, i32 %y) { ; CHECK-LABEL: define internal i1 @f.zext( -; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-SAME: i32 range(i32 100, 301) [[X:%.*]], i32 range(i32 -120, 901) [[Y:%.*]]) { ; CHECK-NEXT: [[T_1:%.*]] = zext nneg i32 [[X]] to i64 ; CHECK-NEXT: [[C_2:%.*]] = icmp sgt i64 [[T_1]], 299 ; CHECK-NEXT: [[C_4:%.*]] = icmp slt i64 [[T_1]], 101 @@ -114,7 +114,7 @@ define i1 @caller.zext() { ; x = [100, 301) define internal i1 @f.sext(i32 %x, i32 %y) { ; CHECK-LABEL: define internal i1 @f.sext( -; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-SAME: i32 range(i32 100, 301) [[X:%.*]], i32 range(i32 -120, 901) [[Y:%.*]]) { ; CHECK-NEXT: [[T_1:%.*]] = zext nneg i32 [[X]] to i64 ; CHECK-NEXT: [[C_2:%.*]] = icmp sgt i64 [[T_1]], 299 ; CHECK-NEXT: [[C_4:%.*]] = icmp slt i64 [[T_1]], 101 @@ -166,7 +166,7 @@ define i1 @caller.sext() { ; There's nothing we can do besides going to the full range or overdefined. define internal i1 @f.fptosi(i32 %x) { ; CHECK-LABEL: define internal i1 @f.fptosi( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 range(i32 100, 301) [[X:%.*]]) { ; CHECK-NEXT: [[TO_DOUBLE:%.*]] = uitofp nneg i32 [[X]] to double ; CHECK-NEXT: [[ADD:%.*]] = fadd double 0.000000e+00, [[TO_DOUBLE]] ; CHECK-NEXT: [[TO_I32:%.*]] = fptosi double [[ADD]] to i32 @@ -208,7 +208,7 @@ define i1 @caller.fptosi() { ; There's nothing we can do besides going to the full range or overdefined. define internal i1 @f.fpext(i16 %x) { ; CHECK-LABEL: define internal i1 @f.fpext( -; CHECK-SAME: i16 [[X:%.*]]) { +; CHECK-SAME: i16 range(i16 100, 301) [[X:%.*]]) { ; CHECK-NEXT: [[TO_FLOAT:%.*]] = uitofp nneg i16 [[X]] to float ; CHECK-NEXT: [[TO_DOUBLE:%.*]] = fpext float [[TO_FLOAT]] to double ; CHECK-NEXT: [[TO_I64:%.*]] = fptoui float [[TO_FLOAT]] to i64 @@ -251,7 +251,7 @@ define i1 @caller.fpext() { ; There's nothing we can do besides going to the full range or overdefined. define internal i1 @f.inttoptr.ptrtoint(i64 %x) { ; CHECK-LABEL: define internal i1 @f.inttoptr.ptrtoint( -; CHECK-SAME: i64 [[X:%.*]]) { +; CHECK-SAME: i64 range(i64 100, 301) [[X:%.*]]) { ; CHECK-NEXT: [[TO_PTR:%.*]] = inttoptr i64 [[X]] to ptr ; CHECK-NEXT: [[TO_I64:%.*]] = ptrtoint ptr [[TO_PTR]] to i64 ; CHECK-NEXT: [[C_1:%.*]] = icmp sgt i64 [[TO_I64]], 300 @@ -325,7 +325,7 @@ entry: define internal i64 @f.sext_to_zext(i32 %t) { ; CHECK-LABEL: define internal range(i64 0, 2) i64 @f.sext_to_zext( -; CHECK-SAME: i32 [[T:%.*]]) { +; CHECK-SAME: i32 range(i32 0, 2) [[T:%.*]]) { ; CHECK-NEXT: [[A:%.*]] = zext nneg i32 [[T]] to i64 ; CHECK-NEXT: ret i64 [[A]] ; diff --git a/llvm/test/Transforms/SCCP/ip-ranges-phis.ll b/llvm/test/Transforms/SCCP/ip-ranges-phis.ll index 4fb73c5..5db2704 100644 --- a/llvm/test/Transforms/SCCP/ip-ranges-phis.ll +++ b/llvm/test/Transforms/SCCP/ip-ranges-phis.ll @@ -3,7 +3,7 @@ define internal i32 @f1(i32 %x) { ; CHECK-LABEL: define {{[^@]+}}@f1 -; CHECK-SAME: (i32 [[X:%.*]]) { +; CHECK-SAME: (i32 range(i32 0, 2) [[X:%.*]]) { ; CHECK-NEXT: ret i32 poison ; %cmp = icmp sgt i32 %x, 300 @@ -40,7 +40,7 @@ end: define internal i32 @f2(i32 %x, i32 %y, i32 %z, i1 %cmp.1, i1 %cmp.2) { ; CHECK-LABEL: define {{[^@]+}}@f2 -; CHECK-SAME: (i32 [[X:%.*]], i32 [[Y:%.*]], i32 [[Z:%.*]], i1 [[CMP_1:%.*]], i1 [[CMP_2:%.*]]) { +; CHECK-SAME: (i32 range(i32 0, 2) [[X:%.*]], i32 range(i32 -10, 2) [[Y:%.*]], i32 range(i32 1, 11) [[Z:%.*]], i1 [[CMP_1:%.*]], i1 [[CMP_2:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 [[CMP_1]], label [[IF_TRUE_1:%.*]], label [[END:%.*]] ; CHECK: if.true.1: @@ -133,7 +133,7 @@ end: define internal i32 @f3(i32 %x, i32 %y, i1 %cmp.1) { ; CHECK-LABEL: define {{[^@]+}}@f3 -; CHECK-SAME: (i32 [[X:%.*]], i32 [[Y:%.*]], i1 [[CMP_1:%.*]]) { +; CHECK-SAME: (i32 range(i32 0, 6) [[X:%.*]], i32 [[Y:%.*]], i1 [[CMP_1:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 [[CMP_1]], label [[IF_TRUE_1:%.*]], label [[END:%.*]] ; CHECK: if.true.1: diff --git a/llvm/test/Transforms/SCCP/ip-ranges-select.ll b/llvm/test/Transforms/SCCP/ip-ranges-select.ll index 7a507ea..6ce46af 100644 --- a/llvm/test/Transforms/SCCP/ip-ranges-select.ll +++ b/llvm/test/Transforms/SCCP/ip-ranges-select.ll @@ -18,7 +18,7 @@ define void @caller.1(ptr %arg) { define internal i32 @callee.1(i32 %arg) { ; CHECK-LABEL: define {{[^@]+}}@callee.1 -; CHECK-SAME: (i32 [[ARG:%.*]]) { +; CHECK-SAME: (i32 range(i32 2, 5) [[ARG:%.*]]) { ; CHECK-NEXT: [[SEL:%.*]] = select i1 false, i32 16, i32 [[ARG]] ; CHECK-NEXT: br label [[BB10:%.*]] ; CHECK: bb10: @@ -40,7 +40,7 @@ declare void @use(i32) define internal i1 @f1(i32 %x, i32 %y, i1 %cmp) { ; CHECK-LABEL: define {{[^@]+}}@f1 -; CHECK-SAME: (i32 [[X:%.*]], i32 [[Y:%.*]], i1 [[CMP:%.*]]) { +; CHECK-SAME: (i32 range(i32 10, 21) [[X:%.*]], i32 range(i32 100, 201) [[Y:%.*]], i1 [[CMP:%.*]]) { ; CHECK-NEXT: [[SEL_1:%.*]] = select i1 [[CMP]], i32 [[X]], i32 [[Y]] ; CHECK-NEXT: [[C_2:%.*]] = icmp sgt i32 [[SEL_1]], 100 ; CHECK-NEXT: [[C_3:%.*]] = icmp eq i32 [[SEL_1]], 50 diff --git a/llvm/test/Transforms/SCCP/musttail-call.ll b/llvm/test/Transforms/SCCP/musttail-call.ll index 085662a..d5e0dfd0 100644 --- a/llvm/test/Transforms/SCCP/musttail-call.ll +++ b/llvm/test/Transforms/SCCP/musttail-call.ll @@ -71,7 +71,7 @@ define internal ptr @no_side_effects(i8 %v) readonly nounwind willreturn { ; return value should stay as it is, and should not be zapped. define internal ptr @dont_zap_me(i8 %v) { ; CHECK-LABEL: define {{[^@]+}}@dont_zap_me -; CHECK-SAME: (i8 [[V:%.*]]) { +; CHECK-SAME: (i8 range(i8 2, 0) [[V:%.*]]) { ; CHECK-NEXT: [[I1:%.*]] = call i32 @external() ; CHECK-NEXT: ret ptr null ; diff --git a/llvm/test/Transforms/SCCP/pointer-nonnull.ll b/llvm/test/Transforms/SCCP/pointer-nonnull.ll index 9eb3a45..6a82779 100644 --- a/llvm/test/Transforms/SCCP/pointer-nonnull.ll +++ b/llvm/test/Transforms/SCCP/pointer-nonnull.ll @@ -248,9 +248,13 @@ define ptr @ret_maybe_null_pointer(ptr %p) { } define internal void @ip_nonnull_arg_callee(ptr %p) { -; CHECK-LABEL: define internal void @ip_nonnull_arg_callee( -; CHECK-SAME: ptr [[P:%.*]]) { -; CHECK-NEXT: ret void +; SCCP-LABEL: define internal void @ip_nonnull_arg_callee( +; SCCP-SAME: ptr [[P:%.*]]) { +; SCCP-NEXT: ret void +; +; IPSCCP-LABEL: define internal void @ip_nonnull_arg_callee( +; IPSCCP-SAME: ptr nonnull [[P:%.*]]) { +; IPSCCP-NEXT: ret void ; ret void } diff --git a/llvm/test/Transforms/SCCP/resolvedundefsin-tracked-fn.ll b/llvm/test/Transforms/SCCP/resolvedundefsin-tracked-fn.ll index 88e9e65..07913cd 100644 --- a/llvm/test/Transforms/SCCP/resolvedundefsin-tracked-fn.ll +++ b/llvm/test/Transforms/SCCP/resolvedundefsin-tracked-fn.ll @@ -56,7 +56,7 @@ entry: define internal i1 @test1_g(ptr %h, i32 %i) #0 { ; CHECK-LABEL: define {{[^@]+}}@test1_g -; CHECK-SAME: (ptr [[H:%.*]], i32 [[I:%.*]]) { +; CHECK-SAME: (ptr [[H:%.*]], i32 range(i32 0, 2) [[I:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[I]], 0 ; CHECK-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]] @@ -221,7 +221,7 @@ exit: define internal i1 @test3_g(ptr %h, i32 %i) { ; CHECK-LABEL: define {{[^@]+}}@test3_g -; CHECK-SAME: (ptr [[H:%.*]], i32 [[I:%.*]]) { +; CHECK-SAME: (ptr [[H:%.*]], i32 range(i32 0, 2) [[I:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[I]], 0 ; CHECK-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]] diff --git a/llvm/test/Transforms/SCCP/switch.ll b/llvm/test/Transforms/SCCP/switch.ll index 5208213..fb81213 100644 --- a/llvm/test/Transforms/SCCP/switch.ll +++ b/llvm/test/Transforms/SCCP/switch.ll @@ -207,7 +207,7 @@ switch.2: ; range information. define internal i32 @test_ip_range(i32 %x) { ; CHECK-LABEL: define internal range(i32 1, 4) i32 @test_ip_range( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 range(i32 1, 4) [[X:%.*]]) { ; CHECK-NEXT: switch i32 [[X]], label [[DEFAULT_UNREACHABLE:%.*]] [ ; CHECK-NEXT: i32 3, label [[SWITCH_3:%.*]] ; CHECK-NEXT: i32 1, label [[SWITCH_1:%.*]] diff --git a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-user-not-min.ll b/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-user-not-min.ll index 50b19d0..6922df8 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-user-not-min.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-user-not-min.ll @@ -6,10 +6,10 @@ define void @test(ptr %block, ptr noalias %pixels, i1 %b) { ; CHECK-SAME: ptr [[BLOCK:%.*]], ptr noalias [[PIXELS:%.*]], i1 [[B:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i1> <i1 true, i1 poison, i1 false, i1 false>, i1 [[B]], i32 1 +; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i1> [[TMP0]] to <4 x i8> ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr [[BLOCK]], align 2 ; CHECK-NEXT: [[TMP3:%.*]] = icmp ult <4 x i16> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = trunc <4 x i16> [[TMP2]] to <4 x i8> -; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i1> [[TMP0]] to <4 x i8> ; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[TMP3]], <4 x i8> [[TMP4]], <4 x i8> [[TMP1]] ; CHECK-NEXT: store <4 x i8> [[TMP5]], ptr [[PIXELS]], align 1 ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/SLPVectorizer/X86/trunc-node-reused.ll b/llvm/test/Transforms/SLPVectorizer/X86/trunc-node-reused.ll new file mode 100644 index 0000000..4b62ef6 --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/X86/trunc-node-reused.ll @@ -0,0 +1,74 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s + +define i16 @test() { +; CHECK-LABEL: define i16 @test() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call <4 x i1> @llvm.vector.insert.v4i1.v2i1(<4 x i1> <i1 false, i1 false, i1 poison, i1 poison>, <2 x i1> zeroinitializer, i64 2) +; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i1> zeroinitializer, [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = xor <4 x i1> zeroinitializer, [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP2]], zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i1> [[TMP3]] to <4 x i64> +; CHECK-NEXT: [[TMP5:%.*]] = icmp slt <4 x i64> [[TMP4]], zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = or <4 x i1> zeroinitializer, [[TMP1]] +; CHECK-NEXT: [[TMP7:%.*]] = select <4 x i1> [[TMP5]], <4 x i1> zeroinitializer, <4 x i1> [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = sext <4 x i1> [[TMP7]] to <4 x i16> +; CHECK-NEXT: [[TMP9:%.*]] = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> [[TMP8]]) +; CHECK-NEXT: ret i16 [[TMP9]] +; +entry: + %conv73 = xor i64 0, 0 + %and.i = and i64 0, 0 + %xor2.i = or i64 %and.i, 0 + %sub.i = or i64 %xor2.i, 0 + %xor3.i = xor i64 %sub.i, %conv73 + %and4.i = and i64 %xor3.i, 0 + %cmp.i = icmp slt i64 %and4.i, 0 + %0 = trunc i64 %conv73 to i16 + %1 = or i16 0, %0 + %conv73i = xor i64 0, 0 + %andi.i = and i64 0, 0 + %xor2i.i = or i64 %andi.i, 0 + %subi.i = or i64 %xor2i.i, 0 + %xor3i.i = xor i64 %subi.i, %conv73i + %and4i.i = and i64 %xor3i.i, 0 + %cmpi.i = icmp slt i64 %and4i.i, 0 + %2 = trunc i64 %conv73i to i16 + %3 = or i16 0, %2 + %4 = select i1 %cmpi.i, i16 0, i16 %3 + %5 = select i1 %cmp.i, i16 0, i16 %1 + %6 = zext i32 0 to i64 + %add.ip = or i64 %6, 0 + %orp = or i64 %add.ip, 0 + %conv72p = shl i64 %orp, 0 + %sextp = ashr i64 %conv72p, 0 + %conv73p = xor i64 %sextp, 0 + %and.ip = and i64 0, 0 + %xor2.ip = or i64 %and.ip, 0 + %sub.ip = or i64 %xor2.ip, 0 + %xor3.ip = xor i64 %sub.ip, %conv73p + %and4.ip = and i64 %xor3.ip, 0 + %cmp.ip = icmp slt i64 %and4.ip, 0 + %7 = trunc i64 %conv73p to i16 + %8 = or i16 0, %7 + %9 = select i1 %cmp.ip, i16 0, i16 %8 + %conv76i = and i16 %4, %5 + %conv76p = and i16 %conv76i, %9 + %10 = zext i32 0 to i64 + %add.ip1 = or i64 %10, 0 + %orp1 = or i64 %add.ip1, 0 + %conv72p1 = shl i64 %orp1, 0 + %sextp1 = ashr i64 %conv72p1, 0 + %conv73p1 = xor i64 %sextp1, 0 + %and.ip1 = and i64 0, 0 + %xor2.ip1 = or i64 %and.ip1, 0 + %sub.ip1 = or i64 %xor2.ip1, 0 + %xor3.ip1 = xor i64 %sub.ip1, %conv73p1 + %and4.ip1 = and i64 %xor3.ip1, 0 + %cmp.ip1 = icmp slt i64 %and4.ip1, 0 + %11 = trunc i64 %conv73p1 to i16 + %12 = or i16 0, %11 + %13 = select i1 %cmp.ip1, i16 0, i16 %12 + %conv76p2 = and i16 %conv76p, %13 + ret i16 %conv76p2 +} diff --git a/llvm/test/Transforms/SimplifyCFG/X86/hoist-loads-stores-with-cf.ll b/llvm/test/Transforms/SimplifyCFG/X86/hoist-loads-stores-with-cf.ll index 047ca71..0507e4e 100644 --- a/llvm/test/Transforms/SimplifyCFG/X86/hoist-loads-stores-with-cf.ll +++ b/llvm/test/Transforms/SimplifyCFG/X86/hoist-loads-stores-with-cf.ll @@ -72,10 +72,9 @@ define i32 @succ1to0_phi(ptr %p) { ; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[P:%.*]], null ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[COND]], true ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i1 [[TMP0]] to <1 x i1> -; CHECK-NEXT: [[TMP2:%.*]] = call <1 x i32> @llvm.masked.load.v1i32.p0(ptr [[P]], i32 4, <1 x i1> [[TMP1]], <1 x i32> poison) +; CHECK-NEXT: [[TMP2:%.*]] = call <1 x i32> @llvm.masked.load.v1i32.p0(ptr [[P]], i32 4, <1 x i1> [[TMP1]], <1 x i32> zeroinitializer) ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i32> [[TMP2]] to i32 -; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[COND]], i32 0, i32 [[TMP3]] -; CHECK-NEXT: ret i32 [[SPEC_SELECT]] +; CHECK-NEXT: ret i32 [[TMP3]] ; entry: %cond = icmp eq ptr %p, null @@ -184,10 +183,9 @@ define i32 @load_from_gep(ptr %p) { ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 16 ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[COND]], true ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i1 [[TMP0]] to <1 x i1> -; CHECK-NEXT: [[TMP2:%.*]] = call <1 x i32> @llvm.masked.load.v1i32.p0(ptr [[ARRAYIDX]], i32 4, <1 x i1> [[TMP1]], <1 x i32> poison) +; CHECK-NEXT: [[TMP2:%.*]] = call <1 x i32> @llvm.masked.load.v1i32.p0(ptr [[ARRAYIDX]], i32 4, <1 x i1> [[TMP1]], <1 x i32> zeroinitializer) ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i32> [[TMP2]] to i32 -; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[COND]], i32 0, i32 [[TMP3]] -; CHECK-NEXT: ret i32 [[SPEC_SELECT]] +; CHECK-NEXT: ret i32 [[TMP3]] ; entry: %cond = icmp eq ptr %p, null @@ -674,6 +672,62 @@ if.false: ret void } +define i32 @str_transcode0(i1 %cond1, ptr %p, i1 %cond2) { +; CHECK-LABEL: @str_transcode0( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 [[COND1:%.*]], label [[BB3:%.*]], label [[BB1:%.*]] +; CHECK: bb1: +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i1 [[COND2:%.*]] to <1 x i1> +; CHECK-NEXT: [[TMP1:%.*]] = call <1 x i64> @llvm.masked.load.v1i64.p0(ptr [[P:%.*]], i32 8, <1 x i1> [[TMP0]], <1 x i64> zeroinitializer) +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <1 x i64> [[TMP1]] to i64 +; CHECK-NEXT: br label [[BB3]] +; CHECK: bb3: +; CHECK-NEXT: [[Y:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[TMP2]], [[BB1]] ] +; CHECK-NEXT: store i64 [[Y]], ptr [[P]], align 8 +; CHECK-NEXT: ret i32 0 +; +entry: + br i1 %cond1, label %bb3, label %bb1 + +bb1: ; preds = %entry + br i1 %cond2, label %bb2, label %bb3 + +bb2: ; preds = %bb1 + %x = load i64, ptr %p, align 8 + br label %bb3 + +bb3: ; preds = %bb2, %bb1, %entry + %y = phi i64 [ %x, %bb2 ], [ 0, %bb1 ], [ 0, %entry ] + store i64 %y, ptr %p, align 8 + ret i32 0 +} + +define i32 @succ1to0_phi2(ptr %p, ptr %p2) { +; CHECK-LABEL: @succ1to0_phi2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[P:%.*]], null +; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[COND]], true +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i1 [[TMP0]] to <1 x i1> +; CHECK-NEXT: [[TMP2:%.*]] = call <1 x i32> @llvm.masked.load.v1i32.p0(ptr [[P]], i32 4, <1 x i1> [[TMP1]], <1 x i32> zeroinitializer) +; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i32> [[TMP2]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[TMP3]] to <1 x i32> +; CHECK-NEXT: call void @llvm.masked.store.v1i32.p0(<1 x i32> [[TMP4]], ptr [[P2:%.*]], i32 4, <1 x i1> [[TMP1]]) +; CHECK-NEXT: ret i32 [[TMP3]] +; +entry: + %cond = icmp eq ptr %p, null + br i1 %cond, label %if.true, label %if.false + +if.false: + %0 = load i32, ptr %p + store i32 %0, ptr %p2 + br label %if.true + +if.true: + %res = phi i32 [ %0, %if.false ], [ 0, %entry ] + ret i32 %res +} + declare i32 @read_memory_only() readonly nounwind willreturn speculatable !llvm.dbg.cu = !{!0} diff --git a/llvm/test/Transforms/VectorCombine/AArch64/shrink-types.ll b/llvm/test/Transforms/VectorCombine/AArch64/shrink-types.ll index 1a23f0a..4216b0e 100644 --- a/llvm/test/Transforms/VectorCombine/AArch64/shrink-types.ll +++ b/llvm/test/Transforms/VectorCombine/AArch64/shrink-types.ll @@ -100,4 +100,17 @@ vector.body: ret i32 %2 } +define <2 x i32> @pr108698(<2 x i64> %x, <2 x i32> %y) { +; CHECK-LABEL: @pr108698( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i64> [[X:%.*]], zeroinitializer +; CHECK-NEXT: [[EXT:%.*]] = zext <2 x i1> [[CMP]] to <2 x i32> +; CHECK-NEXT: [[LSHR:%.*]] = lshr <2 x i32> [[EXT]], [[Y:%.*]] +; CHECK-NEXT: ret <2 x i32> [[LSHR]] +; + %cmp = icmp eq <2 x i64> %x, zeroinitializer + %ext = zext <2 x i1> %cmp to <2 x i32> + %lshr = lshr <2 x i32> %ext, %y + ret <2 x i32> %lshr +} + declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/partially-overlapping-group-resources.s b/llvm/test/tools/llvm-mca/X86/Znver4/partially-overlapping-group-resources.s new file mode 100644 index 0000000..7705c90 --- /dev/null +++ b/llvm/test/tools/llvm-mca/X86/Znver4/partially-overlapping-group-resources.s @@ -0,0 +1,91 @@ +# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py +# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=znver4 -timeline -timeline-max-iterations=1 < %s | FileCheck %s + +# This is a test for issue #108157. +# +# llvm-mca was crashing when analyzing the following code snippet. This was due to a bug in the +# instruction issue logic (from class ResourceManager) which was affecting instructions declaring +# consumption of partially overlapping resources. +# This test makes sure that analysis is successful, and that no crash occurs. + +.intel_syntax +vpconflictd zmm0, zmm3 +kxnorw k1, k1, k1 +vpxord zmm1, zmm1, zmm1 + +# CHECK: Iterations: 100 +# CHECK-NEXT: Instructions: 300 +# CHECK-NEXT: Total Cycles: 209 +# CHECK-NEXT: Total uOps: 600 + +# CHECK: Dispatch Width: 6 +# CHECK-NEXT: uOps Per Cycle: 2.87 +# CHECK-NEXT: IPC: 1.44 +# CHECK-NEXT: Block RThroughput: 1.5 + +# CHECK: Instruction Info: +# CHECK-NEXT: [1]: #uOps +# CHECK-NEXT: [2]: Latency +# CHECK-NEXT: [3]: RThroughput +# CHECK-NEXT: [4]: MayLoad +# CHECK-NEXT: [5]: MayStore +# CHECK-NEXT: [6]: HasSideEffects (U) + +# CHECK: [1] [2] [3] [4] [5] [6] Instructions: +# CHECK-NEXT: 4 6 1.50 vpconflictd zmm0, zmm3 +# CHECK-NEXT: 1 1 0.50 kxnorw k1, k1, k1 +# CHECK-NEXT: 1 0 0.17 vpxord zmm1, zmm1, zmm1 + +# CHECK: Resources: +# CHECK-NEXT: [0] - Zn4AGU0 +# CHECK-NEXT: [1] - Zn4AGU1 +# CHECK-NEXT: [2] - Zn4AGU2 +# CHECK-NEXT: [3] - Zn4ALU0 +# CHECK-NEXT: [4] - Zn4ALU1 +# CHECK-NEXT: [5] - Zn4ALU2 +# CHECK-NEXT: [6] - Zn4ALU3 +# CHECK-NEXT: [7] - Zn4BRU1 +# CHECK-NEXT: [8] - Zn4FP0 +# CHECK-NEXT: [9] - Zn4FP1 +# CHECK-NEXT: [10] - Zn4FP2 +# CHECK-NEXT: [11] - Zn4FP3 +# CHECK-NEXT: [12.0] - Zn4FP45 +# CHECK-NEXT: [12.1] - Zn4FP45 +# CHECK-NEXT: [13] - Zn4FPSt +# CHECK-NEXT: [14.0] - Zn4LSU +# CHECK-NEXT: [14.1] - Zn4LSU +# CHECK-NEXT: [14.2] - Zn4LSU +# CHECK-NEXT: [15.0] - Zn4Load +# CHECK-NEXT: [15.1] - Zn4Load +# CHECK-NEXT: [15.2] - Zn4Load +# CHECK-NEXT: [16.0] - Zn4Store +# CHECK-NEXT: [16.1] - Zn4Store + +# CHECK: Resource pressure per iteration: +# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] +# CHECK-NEXT: - - - - - - - - 1.76 1.76 1.73 1.75 - - - - - - - - - - - + +# CHECK: Resource pressure by instruction: +# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] Instructions: +# CHECK-NEXT: - - - - - - - - 1.76 1.76 0.74 1.74 - - - - - - - - - - - vpconflictd zmm0, zmm3 +# CHECK-NEXT: - - - - - - - - - - 0.99 0.01 - - - - - - - - - - - kxnorw k1, k1, k1 +# CHECK-NEXT: - - - - - - - - - - - - - - - - - - - - - - - vpxord zmm1, zmm1, zmm1 + +# CHECK: Timeline view: +# CHECK-NEXT: Index 012345678 + +# CHECK: [0,0] DeeeeeeER vpconflictd zmm0, zmm3 +# CHECK-NEXT: [0,1] D==eE---R kxnorw k1, k1, k1 +# CHECK-NEXT: [0,2] D-------R vpxord zmm1, zmm1, zmm1 + +# CHECK: Average Wait times (based on the timeline view): +# CHECK-NEXT: [0]: Executions +# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue +# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready +# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage + +# CHECK: [0] [1] [2] [3] +# CHECK-NEXT: 0. 1 1.0 1.0 0.0 vpconflictd zmm0, zmm3 +# CHECK-NEXT: 1. 1 3.0 3.0 3.0 kxnorw k1, k1, k1 +# CHECK-NEXT: 2. 1 0.0 0.0 7.0 vpxord zmm1, zmm1, zmm1 +# CHECK-NEXT: 1 1.3 1.3 3.3 <total> diff --git a/llvm/tools/lli/lli.cpp b/llvm/tools/lli/lli.cpp index 25f43a4..f21d981 100644 --- a/llvm/tools/lli/lli.cpp +++ b/llvm/tools/lli/lli.cpp @@ -66,7 +66,6 @@ #include "llvm/Support/WithColor.h" #include "llvm/Support/raw_ostream.h" #include "llvm/TargetParser/Triple.h" -#include "llvm/Transforms/Instrumentation.h" #include <cerrno> #include <optional> diff --git a/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp b/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp index e66584b..e404dd0 100644 --- a/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp +++ b/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp @@ -273,6 +273,8 @@ TEST_F(SelectionDAGPatternMatchTest, matchUnaryOp) { SDValue FPToSI = DAG->getNode(ISD::FP_TO_SINT, DL, FloatVT, Op2); SDValue FPToUI = DAG->getNode(ISD::FP_TO_UINT, DL, FloatVT, Op2); + SDValue Ctlz = DAG->getNode(ISD::CTLZ, DL, Int32VT, Op0); + using namespace SDPatternMatch; EXPECT_TRUE(sd_match(ZExt, m_UnaryOp(ISD::ZERO_EXTEND, m_Value()))); EXPECT_TRUE(sd_match(SExt, m_SExt(m_Value()))); @@ -296,6 +298,8 @@ TEST_F(SelectionDAGPatternMatchTest, matchUnaryOp) { EXPECT_TRUE(sd_match(FPToSI, m_FPToSI(m_Value()))); EXPECT_FALSE(sd_match(FPToUI, m_FPToSI(m_Value()))); EXPECT_FALSE(sd_match(FPToSI, m_FPToUI(m_Value()))); + + EXPECT_TRUE(sd_match(Ctlz, m_Ctlz(m_Value()))); } TEST_F(SelectionDAGPatternMatchTest, matchConstants) { diff --git a/llvm/unittests/Support/DynamicLibrary/CMakeLists.txt b/llvm/unittests/Support/DynamicLibrary/CMakeLists.txt index d8dff1e..2366209 100644 --- a/llvm/unittests/Support/DynamicLibrary/CMakeLists.txt +++ b/llvm/unittests/Support/DynamicLibrary/CMakeLists.txt @@ -15,12 +15,22 @@ set_output_directory(DynamicLibraryLib LIBRARY_DIR ${LLVM_LIBRARY_OUTPUT_INTDIR} ) -add_llvm_unittest(DynamicLibraryTests - DynamicLibraryTest.cpp +# FIXME: Find out why AIX fails with new DynamicLibrary symbols behavior. +if(${CMAKE_SYSTEM_NAME} MATCHES "AIX") + add_llvm_unittest(DynamicLibraryTests + DynamicLibraryTest.cpp + ) +else() + add_llvm_unittest(DynamicLibraryTests + DynamicLibraryTest.cpp - EXPORT_SYMBOLS - ) + EXPORT_SYMBOLS + ) +endif() target_link_libraries(DynamicLibraryTests PRIVATE DynamicLibraryLib) +if(${CMAKE_SYSTEM_NAME} MATCHES "AIX") + export_executable_symbols(DynamicLibraryTests) +endif() function(dynlib_add_module NAME) add_library(${NAME} MODULE diff --git a/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/llvm/utils/TableGen/AsmMatcherEmitter.cpp index 654b21d..2a94b77 100644 --- a/llvm/utils/TableGen/AsmMatcherEmitter.cpp +++ b/llvm/utils/TableGen/AsmMatcherEmitter.cpp @@ -454,7 +454,7 @@ struct MatchableInfo { int64_t ImmVal; /// Register - This is the register record. - Record *Register; + const Record *Register; }; /// MINumOperands - The number of MCInst operands populated by this @@ -486,7 +486,7 @@ struct MatchableInfo { return X; } - static ResOperand getRegOp(Record *Reg) { + static ResOperand getRegOp(const Record *Reg) { ResOperand X; X.Kind = RegOperand; X.Register = Reg; @@ -1946,7 +1946,7 @@ void MatchableInfo::buildAliasResultOperands(bool AliasConstraintsAreChecked) { break; } case CodeGenInstAlias::ResultOperand::K_Reg: { - Record *Reg = CGA.ResultOperands[AliasOpNo].getRegister(); + const Record *Reg = CGA.ResultOperands[AliasOpNo].getRegister(); ResOperands.push_back(ResOperand::getRegOp(Reg)); break; } diff --git a/llvm/utils/TableGen/AsmWriterEmitter.cpp b/llvm/utils/TableGen/AsmWriterEmitter.cpp index 79a1c24..cbf3a38 100644 --- a/llvm/utils/TableGen/AsmWriterEmitter.cpp +++ b/llvm/utils/TableGen/AsmWriterEmitter.cpp @@ -636,7 +636,7 @@ void AsmWriterEmitter::EmitGetRegisterName(raw_ostream &O) { Record *AsmWriter = Target.getAsmWriter(); StringRef ClassName = AsmWriter->getValueAsString("AsmWriterClassName"); const auto &Registers = Target.getRegBank().getRegisters(); - const std::vector<Record *> &AltNameIndices = Target.getRegAltNameIndices(); + ArrayRef<const Record *> AltNameIndices = Target.getRegAltNameIndices(); bool hasAltNames = AltNameIndices.size() > 1; StringRef Namespace = Registers.front().TheDef->getValueAsString("Namespace"); @@ -953,7 +953,7 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) { if (Rec->isSubClassOf("RegisterClass")) { if (!IAP.isOpMapped(ROName)) { IAP.addOperand(ROName, MIOpNum, PrintMethodIdx); - Record *R = CGA.ResultOperands[i].getRecord(); + const Record *R = CGA.ResultOperands[i].getRecord(); if (R->isSubClassOf("RegisterOperand")) R = R->getValueAsDef("RegClass"); IAP.addCond(std::string( diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp index a77e247..fd80bc6 100644 --- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp +++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp @@ -1529,7 +1529,7 @@ std::string PatternToMatch::getPredicateCheck() const { // SDTypeConstraint implementation // -SDTypeConstraint::SDTypeConstraint(Record *R, const CodeGenHwModes &CGH) { +SDTypeConstraint::SDTypeConstraint(const Record *R, const CodeGenHwModes &CGH) { OperandNo = R->getValueAsInt("OperandNum"); if (R->isSubClassOf("SDTCisVT")) { @@ -1799,7 +1799,7 @@ bool TreePatternNode::setDefaultMode(unsigned Mode) { //===----------------------------------------------------------------------===// // SDNodeInfo implementation // -SDNodeInfo::SDNodeInfo(Record *R, const CodeGenHwModes &CGH) : Def(R) { +SDNodeInfo::SDNodeInfo(const Record *R, const CodeGenHwModes &CGH) : Def(R) { EnumName = R->getValueAsString("Opcode"); SDClassName = R->getValueAsString("SDClass"); Record *TypeProfile = R->getValueAsDef("TypeProfile"); @@ -2296,7 +2296,7 @@ static TypeSetByHwMode getImplicitType(Record *R, unsigned ResNo, assert(ResNo == 0 && "FIXME: ComplexPattern with multiple results?"); if (NotRegisters) return TypeSetByHwMode(); // Unknown. - Record *T = CDP.getComplexPattern(R).getValueType(); + const Record *T = CDP.getComplexPattern(R).getValueType(); const CodeGenHwModes &CGH = CDP.getTargetInfo().getHwModes(); return TypeSetByHwMode(getValueTypeByHwMode(T, CGH)); } @@ -2700,7 +2700,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { if (!NotRegisters) { assert(Types.size() == 1 && "ComplexPatterns only produce one result!"); - Record *T = CDP.getComplexPattern(getOperator()).getValueType(); + const Record *T = CDP.getComplexPattern(getOperator()).getValueType(); const CodeGenHwModes &CGH = CDP.getTargetInfo().getHwModes(); const ValueTypeByHwMode VVT = getValueTypeByHwMode(T, CGH); // TODO: AArch64 and AMDGPU use ComplexPattern<untyped, ...> and then @@ -3157,7 +3157,7 @@ void TreePattern::dump() const { print(errs()); } // CodeGenDAGPatterns implementation // -CodeGenDAGPatterns::CodeGenDAGPatterns(RecordKeeper &R, +CodeGenDAGPatterns::CodeGenDAGPatterns(const RecordKeeper &R, PatternRewriterFn PatternRewriter) : Records(R), Target(R), Intrinsics(R), LegalVTS(Target.getLegalValueTypes()), PatternRewriter(PatternRewriter) { @@ -3198,14 +3198,10 @@ Record *CodeGenDAGPatterns::getSDNodeNamed(StringRef Name) const { // Parse all of the SDNode definitions for the target, populating SDNodes. void CodeGenDAGPatterns::ParseNodeInfo() { - std::vector<Record *> Nodes = Records.getAllDerivedDefinitions("SDNode"); const CodeGenHwModes &CGH = getTargetInfo().getHwModes(); - while (!Nodes.empty()) { - Record *R = Nodes.back(); + for (const Record *R : reverse(Records.getAllDerivedDefinitions("SDNode"))) SDNodes.insert(std::pair(R, SDNodeInfo(R, CGH))); - Nodes.pop_back(); - } // Get the builtin intrinsic nodes. intrinsic_void_sdnode = getSDNodeNamed("intrinsic_void"); @@ -3216,26 +3212,18 @@ void CodeGenDAGPatterns::ParseNodeInfo() { /// ParseNodeTransforms - Parse all SDNodeXForm instances into the SDNodeXForms /// map, and emit them to the file as functions. void CodeGenDAGPatterns::ParseNodeTransforms() { - std::vector<Record *> Xforms = - Records.getAllDerivedDefinitions("SDNodeXForm"); - while (!Xforms.empty()) { - Record *XFormNode = Xforms.back(); - Record *SDNode = XFormNode->getValueAsDef("Opcode"); + for (const Record *XFormNode : + reverse(Records.getAllDerivedDefinitions("SDNodeXForm"))) { + const Record *SDNode = XFormNode->getValueAsDef("Opcode"); StringRef Code = XFormNode->getValueAsString("XFormFunction"); - SDNodeXForms.insert( - std::pair(XFormNode, NodeXForm(SDNode, std::string(Code)))); - - Xforms.pop_back(); + SDNodeXForms.insert({XFormNode, NodeXForm(SDNode, std::string(Code))}); } } void CodeGenDAGPatterns::ParseComplexPatterns() { - std::vector<Record *> AMs = - Records.getAllDerivedDefinitions("ComplexPattern"); - while (!AMs.empty()) { - ComplexPatterns.insert(std::pair(AMs.back(), AMs.back())); - AMs.pop_back(); - } + for (const Record *R : + reverse(Records.getAllDerivedDefinitions("ComplexPattern"))) + ComplexPatterns.insert({R, R}); } /// ParsePatternFragments - Parse all of the PatFrag definitions in the .td @@ -3244,11 +3232,10 @@ void CodeGenDAGPatterns::ParseComplexPatterns() { /// inside a pattern fragment to a pattern fragment. /// void CodeGenDAGPatterns::ParsePatternFragments(bool OutFrags) { - std::vector<Record *> Fragments = - Records.getAllDerivedDefinitions("PatFrags"); - // First step, parse all of the fragments. - for (Record *Frag : Fragments) { + ArrayRef<const Record *> Fragments = + Records.getAllDerivedDefinitions("PatFrags"); + for (const Record *Frag : Fragments) { if (OutFrags != Frag->isSubClassOf("OutPatFrag")) continue; @@ -3307,7 +3294,7 @@ void CodeGenDAGPatterns::ParsePatternFragments(bool OutFrags) { // Now that we've parsed all of the tree fragments, do a closure on them so // that there are not references to PatFrags left inside of them. - for (Record *Frag : Fragments) { + for (const Record *Frag : Fragments) { if (OutFrags != Frag->isSubClassOf("OutPatFrag")) continue; @@ -3331,8 +3318,8 @@ void CodeGenDAGPatterns::ParsePatternFragments(bool OutFrags) { } void CodeGenDAGPatterns::ParseDefaultOperands() { - std::vector<Record *> DefaultOps; - DefaultOps = Records.getAllDerivedDefinitions("OperandWithDefaultOps"); + ArrayRef<const Record *> DefaultOps = + Records.getAllDerivedDefinitions("OperandWithDefaultOps"); // Find some SDNode. assert(!SDNodes.empty() && "No SDNodes parsed?"); @@ -3947,10 +3934,7 @@ void CodeGenDAGPatterns::parseInstructionPattern(CodeGenInstruction &CGI, /// any fragments involved. This populates the Instructions list with fully /// resolved instructions. void CodeGenDAGPatterns::ParseInstructions() { - std::vector<Record *> Instrs = - Records.getAllDerivedDefinitions("Instruction"); - - for (Record *Instr : Instrs) { + for (const Record *Instr : Records.getAllDerivedDefinitions("Instruction")) { ListInit *LI = nullptr; if (isa<ListInit>(Instr->getValueInit("Pattern"))) @@ -4346,9 +4330,7 @@ void CodeGenDAGPatterns::ParseOnePattern( } void CodeGenDAGPatterns::ParsePatterns() { - std::vector<Record *> Patterns = Records.getAllDerivedDefinitions("Pattern"); - - for (Record *CurPattern : Patterns) { + for (const Record *CurPattern : Records.getAllDerivedDefinitions("Pattern")) { DagInit *Tree = CurPattern->getValueAsDag("PatternToMatch"); // If the pattern references the null_frag, there's nothing to do. @@ -4407,10 +4389,10 @@ void CodeGenDAGPatterns::ExpandHwModeBasedTypes() { return; } - PatternsToMatch.emplace_back( - P.getSrcRecord(), P.getPredicates(), std::move(NewSrc), - std::move(NewDst), P.getDstRegs(), P.getAddedComplexity(), - Record::getNewUID(Records), P.getGISelShouldIgnore(), Check); + PatternsToMatch.emplace_back(P.getSrcRecord(), P.getPredicates(), + std::move(NewSrc), std::move(NewDst), + P.getDstRegs(), P.getAddedComplexity(), + getNewUID(), P.getGISelShouldIgnore(), Check); }; for (PatternToMatch &P : Copy) { @@ -4780,7 +4762,7 @@ void CodeGenDAGPatterns::GenerateVariants() { PatternsToMatch[i].getSrcRecord(), PatternsToMatch[i].getPredicates(), Variant, PatternsToMatch[i].getDstPatternShared(), PatternsToMatch[i].getDstRegs(), - PatternsToMatch[i].getAddedComplexity(), Record::getNewUID(Records), + PatternsToMatch[i].getAddedComplexity(), getNewUID(), PatternsToMatch[i].getGISelShouldIgnore(), PatternsToMatch[i].getHwModeFeatures()); } @@ -4788,3 +4770,8 @@ void CodeGenDAGPatterns::GenerateVariants() { LLVM_DEBUG(errs() << "\n"); } } + +unsigned CodeGenDAGPatterns::getNewUID() { + RecordKeeper &MutableRC = const_cast<RecordKeeper &>(Records); + return Record::getNewUID(MutableRC); +} diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h index 4dc08e6..0aa6287 100644 --- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h +++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h @@ -354,7 +354,7 @@ typedef StringSet<> MultipleUseVarSet; /// SDTypeConstraint - This is a discriminated union of constraints, /// corresponding to the SDTypeConstraint tablegen class in Target.td. struct SDTypeConstraint { - SDTypeConstraint(Record *R, const CodeGenHwModes &CGH); + SDTypeConstraint(const Record *R, const CodeGenHwModes &CGH); unsigned OperandNo; // The operand # this constraint applies to. enum { @@ -435,7 +435,7 @@ public: /// the target .td file. This represents the various dag nodes we will be /// processing. class SDNodeInfo { - Record *Def; + const Record *Def; StringRef EnumName; StringRef SDClassName; unsigned Properties; @@ -445,14 +445,14 @@ class SDNodeInfo { public: // Parse the specified record. - SDNodeInfo(Record *R, const CodeGenHwModes &CGH); + SDNodeInfo(const Record *R, const CodeGenHwModes &CGH); unsigned getNumResults() const { return NumResults; } /// getNumOperands - This is the number of operands required or -1 if /// variadic. int getNumOperands() const { return NumOperands; } - Record *getRecord() const { return Def; } + const Record *getRecord() const { return Def; } StringRef getEnumName() const { return EnumName; } StringRef getSDClassName() const { return SDClassName; } @@ -1095,13 +1095,17 @@ public: }; class CodeGenDAGPatterns { - RecordKeeper &Records; +public: + using NodeXForm = std::pair<const Record *, std::string>; + +private: + const RecordKeeper &Records; CodeGenTarget Target; CodeGenIntrinsicTable Intrinsics; std::map<const Record *, SDNodeInfo, LessRecordByID> SDNodes; - std::map<const Record *, std::pair<Record *, std::string>, LessRecordByID> - SDNodeXForms; + + std::map<const Record *, NodeXForm, LessRecordByID> SDNodeXForms; std::map<const Record *, ComplexPattern, LessRecordByID> ComplexPatterns; std::map<const Record *, std::unique_ptr<TreePattern>, LessRecordByID> PatternFragments; @@ -1125,7 +1129,7 @@ class CodeGenDAGPatterns { unsigned NumScopes = 0; public: - CodeGenDAGPatterns(RecordKeeper &R, + CodeGenDAGPatterns(const RecordKeeper &R, PatternRewriterFn PatternRewriter = nullptr); CodeGenTarget &getTargetInfo() { return Target; } @@ -1141,7 +1145,6 @@ public: } // Node transformation lookups. - typedef std::pair<Record *, std::string> NodeXForm; const NodeXForm &getSDNodeTransform(const Record *R) const { auto F = SDNodeXForms.find(R); assert(F != SDNodeXForms.end() && "Invalid transform!"); @@ -1254,6 +1257,7 @@ private: MapVector<std::string, TreePatternNodePtr, std::map<std::string, unsigned>> &InstResults, std::vector<Record *> &InstImpResults); + unsigned getNewUID(); }; inline bool SDNodeInfo::ApplyTypeConstraints(TreePatternNode &N, diff --git a/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp b/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp index 653fc23..f23ccf9 100644 --- a/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp +++ b/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp @@ -24,9 +24,11 @@ using namespace llvm; /// constructor. It checks if an argument in an InstAlias pattern matches /// the corresponding operand of the instruction. It returns true on a /// successful match, with ResOp set to the result operand to be used. -bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo, +bool CodeGenInstAlias::tryAliasOpMatch(const DagInit *Result, + unsigned AliasOpNo, const Record *InstOpRec, bool hasSubOps, - ArrayRef<SMLoc> Loc, CodeGenTarget &T, + ArrayRef<SMLoc> Loc, + const CodeGenTarget &T, ResultOperand &ResOp) { Init *Arg = Result->getArg(AliasOpNo); DefInit *ADI = dyn_cast<DefInit>(Arg); @@ -152,11 +154,11 @@ unsigned CodeGenInstAlias::ResultOperand::getMINumOperands() const { if (!isRecord()) return 1; - Record *Rec = getRecord(); + const Record *Rec = getRecord(); if (!Rec->isSubClassOf("Operand")) return 1; - DagInit *MIOpInfo = Rec->getValueAsDag("MIOperandInfo"); + const DagInit *MIOpInfo = Rec->getValueAsDag("MIOperandInfo"); if (MIOpInfo->getNumArgs() == 0) { // Unspecified, so it defaults to 1 return 1; @@ -165,7 +167,8 @@ unsigned CodeGenInstAlias::ResultOperand::getMINumOperands() const { return MIOpInfo->getNumArgs(); } -CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { +CodeGenInstAlias::CodeGenInstAlias(const Record *R, const CodeGenTarget &T) + : TheDef(R) { Result = R->getValueAsDag("ResultInst"); AsmString = std::string(R->getValueAsString("AsmString")); diff --git a/llvm/utils/TableGen/Common/CodeGenInstAlias.h b/llvm/utils/TableGen/Common/CodeGenInstAlias.h index 646f1f1..dd6f93e 100644 --- a/llvm/utils/TableGen/Common/CodeGenInstAlias.h +++ b/llvm/utils/TableGen/Common/CodeGenInstAlias.h @@ -32,7 +32,7 @@ class Record; /// CodeGenInstAlias - This represents an InstAlias definition. class CodeGenInstAlias { public: - Record *TheDef; // The actual record defining this InstAlias. + const Record *TheDef; // The actual record defining this InstAlias. /// AsmString - The format string used to emit a .s file for the /// instruction. @@ -48,16 +48,16 @@ public: struct ResultOperand { private: std::string Name; - Record *R = nullptr; + const Record *R = nullptr; int64_t Imm = 0; public: enum { K_Record, K_Imm, K_Reg } Kind; - ResultOperand(std::string N, Record *r) - : Name(std::move(N)), R(r), Kind(K_Record) {} + ResultOperand(std::string N, const Record *R) + : Name(std::move(N)), R(R), Kind(K_Record) {} ResultOperand(int64_t I) : Imm(I), Kind(K_Imm) {} - ResultOperand(Record *r) : R(r), Kind(K_Reg) {} + ResultOperand(Record *R) : R(R), Kind(K_Reg) {} bool isRecord() const { return Kind == K_Record; } bool isImm() const { return Kind == K_Imm; } @@ -67,7 +67,7 @@ public: assert(isRecord()); return Name; } - Record *getRecord() const { + const Record *getRecord() const { assert(isRecord()); return R; } @@ -75,7 +75,7 @@ public: assert(isImm()); return Imm; } - Record *getRegister() const { + const Record *getRegister() const { assert(isReg()); return R; } @@ -93,11 +93,11 @@ public: /// of them are matched by the operand, the second value should be -1. std::vector<std::pair<unsigned, int>> ResultInstOperandIndex; - CodeGenInstAlias(Record *R, CodeGenTarget &T); + CodeGenInstAlias(const Record *R, const CodeGenTarget &T); - bool tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo, + bool tryAliasOpMatch(const DagInit *Result, unsigned AliasOpNo, const Record *InstOpRec, bool hasSubOps, - ArrayRef<SMLoc> Loc, CodeGenTarget &T, + ArrayRef<SMLoc> Loc, const CodeGenTarget &T, ResultOperand &ResOp); }; diff --git a/llvm/utils/TableGen/Common/CodeGenSchedule.cpp b/llvm/utils/TableGen/Common/CodeGenSchedule.cpp index 3dcfdc9..33d1da2 100644 --- a/llvm/utils/TableGen/Common/CodeGenSchedule.cpp +++ b/llvm/utils/TableGen/Common/CodeGenSchedule.cpp @@ -182,7 +182,7 @@ struct InstRegexOp : public SetTheory::Operator { } // end anonymous namespace /// CodeGenModels ctor interprets machine model records and populates maps. -CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK, +CodeGenSchedModels::CodeGenSchedModels(const RecordKeeper &RK, const CodeGenTarget &TGT) : Records(RK), Target(TGT) { @@ -256,8 +256,7 @@ void CodeGenSchedModels::checkSTIPredicates() const { DenseMap<StringRef, const Record *> Declarations; // There cannot be multiple declarations with the same name. - const RecVec Decls = Records.getAllDerivedDefinitions("STIPredicateDecl"); - for (const Record *R : Decls) { + for (const Record *R : Records.getAllDerivedDefinitions("STIPredicateDecl")) { StringRef Name = R->getValueAsString("Name"); const auto It = Declarations.find(Name); if (It == Declarations.end()) { @@ -270,9 +269,8 @@ void CodeGenSchedModels::checkSTIPredicates() const { } // Disallow InstructionEquivalenceClasses with an empty instruction list. - const RecVec Defs = - Records.getAllDerivedDefinitions("InstructionEquivalenceClass"); - for (const Record *R : Defs) { + for (const Record *R : + Records.getAllDerivedDefinitions("InstructionEquivalenceClass")) { RecVec Opcodes = R->getValueAsListOfDefs("Opcodes"); if (Opcodes.empty()) { PrintFatalError(R->getLoc(), "Invalid InstructionEquivalenceClass " @@ -417,9 +415,7 @@ void CodeGenSchedModels::collectSTIPredicates() { // Map STIPredicateDecl records to elements of vector // CodeGenSchedModels::STIPredicates. DenseMap<const Record *, unsigned> Decl2Index; - - RecVec RV = Records.getAllDerivedDefinitions("STIPredicate"); - for (const Record *R : RV) { + for (const Record *R : Records.getAllDerivedDefinitions("STIPredicate")) { const Record *Decl = R->getValueAsDef("Declaration"); const auto It = Decl2Index.find(Decl); @@ -454,13 +450,10 @@ void OpcodeInfo::addPredicateForProcModel(const llvm::APInt &CpuMask, } void CodeGenSchedModels::checkMCInstPredicates() const { - RecVec MCPredicates = Records.getAllDerivedDefinitions("TIIPredicate"); - if (MCPredicates.empty()) - return; - // A target cannot have multiple TIIPredicate definitions with a same name. - llvm::StringMap<const Record *> TIIPredicates(MCPredicates.size()); - for (const Record *TIIPred : MCPredicates) { + llvm::StringMap<const Record *> TIIPredicates; + for (const Record *TIIPred : + Records.getAllDerivedDefinitions("TIIPredicate")) { StringRef Name = TIIPred->getValueAsString("FunctionName"); StringMap<const Record *>::const_iterator It = TIIPredicates.find(Name); if (It == TIIPredicates.end()) { @@ -476,9 +469,8 @@ void CodeGenSchedModels::checkMCInstPredicates() const { } void CodeGenSchedModels::collectRetireControlUnits() { - RecVec Units = Records.getAllDerivedDefinitions("RetireControlUnit"); - - for (Record *RCU : Units) { + for (const Record *RCU : + Records.getAllDerivedDefinitions("RetireControlUnit")) { CodeGenProcModel &PM = getProcModel(RCU->getValueAsDef("SchedModel")); if (PM.RetireControlUnit) { PrintError(RCU->getLoc(), @@ -491,9 +483,7 @@ void CodeGenSchedModels::collectRetireControlUnits() { } void CodeGenSchedModels::collectLoadStoreQueueInfo() { - RecVec Queues = Records.getAllDerivedDefinitions("MemoryQueue"); - - for (Record *Queue : Queues) { + for (const Record *Queue : Records.getAllDerivedDefinitions("MemoryQueue")) { CodeGenProcModel &PM = getProcModel(Queue->getValueAsDef("SchedModel")); if (Queue->isSubClassOf("LoadQueue")) { if (PM.LoadQueue) { @@ -533,7 +523,8 @@ void CodeGenSchedModels::collectOptionalProcessorInfo() { /// Gather all processor models. void CodeGenSchedModels::collectProcModels() { - RecVec ProcRecords = Records.getAllDerivedDefinitions("Processor"); + std::vector<const Record *> ProcRecords = + Records.getAllDerivedDefinitions("Processor"); // Sort and check duplicate Processor name. sortAndReportDuplicates(ProcRecords, "Processor"); @@ -549,14 +540,14 @@ void CodeGenSchedModels::collectProcModels() { // For each processor, find a unique machine model. LLVM_DEBUG(dbgs() << "+++ PROCESSOR MODELs (addProcModel) +++\n"); - for (Record *ProcRecord : ProcRecords) + for (const Record *ProcRecord : ProcRecords) addProcModel(ProcRecord); } /// Get a unique processor model based on the defined MachineModel and /// ProcessorItineraries. -void CodeGenSchedModels::addProcModel(Record *ProcDef) { - Record *ModelKey = getModelOrItinDef(ProcDef); +void CodeGenSchedModels::addProcModel(const Record *ProcDef) { + const Record *ModelKey = getModelOrItinDef(ProcDef); if (!ProcModelMap.insert(std::pair(ModelKey, ProcModels.size())).second) return; @@ -575,20 +566,18 @@ void CodeGenSchedModels::addProcModel(Record *ProcDef) { } // Recursively find all reachable SchedReadWrite records. -static void scanSchedRW(Record *RWDef, RecVec &RWDefs, - SmallPtrSet<Record *, 16> &RWSet) { +static void scanSchedRW(const Record *RWDef, ConstRecVec &RWDefs, + SmallPtrSet<const Record *, 16> &RWSet) { if (!RWSet.insert(RWDef).second) return; RWDefs.push_back(RWDef); // Reads don't currently have sequence records, but it can be added later. if (RWDef->isSubClassOf("WriteSequence")) { - RecVec Seq = RWDef->getValueAsListOfDefs("Writes"); - for (Record *WSRec : Seq) + for (const Record *WSRec : RWDef->getValueAsListOfDefs("Writes")) scanSchedRW(WSRec, RWDefs, RWSet); } else if (RWDef->isSubClassOf("SchedVariant")) { // Visit each variant (guarded by a different predicate). - RecVec Vars = RWDef->getValueAsListOfDefs("Variants"); - for (Record *Variant : Vars) { + for (const Record *Variant : RWDef->getValueAsListOfDefs("Variants")) { // Visit each RW in the sequence selected by the current variant. RecVec Selected = Variant->getValueAsListOfDefs("Selected"); for (Record *SelDef : Selected) @@ -604,10 +593,10 @@ void CodeGenSchedModels::collectSchedRW() { SchedWrites.resize(1); SchedReads.resize(1); - SmallPtrSet<Record *, 16> RWSet; + SmallPtrSet<const Record *, 16> RWSet; // Find all SchedReadWrites referenced by instruction defs. - RecVec SWDefs, SRDefs; + ConstRecVec SWDefs, SRDefs; for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { const Record *SchedDef = Inst->TheDef; if (SchedDef->isValueUnset("SchedRW")) @@ -623,8 +612,7 @@ void CodeGenSchedModels::collectSchedRW() { } } // Find all ReadWrites referenced by InstRW. - RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW"); - for (Record *InstRWDef : InstRWDefs) { + for (const Record *InstRWDef : Records.getAllDerivedDefinitions("InstRW")) { // For all OperandReadWrites. RecVec RWDefs = InstRWDef->getValueAsListOfDefs("OperandReadWrites"); for (Record *RWDef : RWDefs) { @@ -637,8 +625,7 @@ void CodeGenSchedModels::collectSchedRW() { } } // Find all ReadWrites referenced by ItinRW. - RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW"); - for (Record *ItinRWDef : ItinRWDefs) { + for (const Record *ItinRWDef : Records.getAllDerivedDefinitions("ItinRW")) { // For all OperandReadWrites. RecVec RWDefs = ItinRWDef->getValueAsListOfDefs("OperandReadWrites"); for (Record *RWDef : RWDefs) { @@ -651,12 +638,13 @@ void CodeGenSchedModels::collectSchedRW() { } } // Find all ReadWrites referenced by SchedAlias. AliasDefs needs to be sorted - // for the loop below that initializes Alias vectors. - RecVec AliasDefs = Records.getAllDerivedDefinitions("SchedAlias"); - llvm::sort(AliasDefs, LessRecord()); - for (Record *ADef : AliasDefs) { - Record *MatchDef = ADef->getValueAsDef("MatchRW"); - Record *AliasDef = ADef->getValueAsDef("AliasRW"); + // for the loop below that initializes Alias vectors (which they already + // are by RecordKeeper::getAllDerivedDefinitions). + ArrayRef<const Record *> AliasDefs = + Records.getAllDerivedDefinitions("SchedAlias"); + for (const Record *ADef : AliasDefs) { + const Record *MatchDef = ADef->getValueAsDef("MatchRW"); + const Record *AliasDef = ADef->getValueAsDef("AliasRW"); if (MatchDef->isSubClassOf("SchedWrite")) { if (!AliasDef->isSubClassOf("SchedWrite")) PrintFatalError(ADef->getLoc(), "SchedWrite Alias must be SchedWrite"); @@ -671,12 +659,12 @@ void CodeGenSchedModels::collectSchedRW() { // Sort and add the SchedReadWrites directly referenced by instructions or // itinerary resources. Index reads and writes in separate domains. llvm::sort(SWDefs, LessRecord()); - for (Record *SWDef : SWDefs) { + for (const Record *SWDef : SWDefs) { assert(!getSchedRWIdx(SWDef, /*IsRead=*/false) && "duplicate SchedWrite"); SchedWrites.emplace_back(SchedWrites.size(), SWDef); } llvm::sort(SRDefs, LessRecord()); - for (Record *SRDef : SRDefs) { + for (const Record *SRDef : SRDefs) { assert(!getSchedRWIdx(SRDef, /*IsRead-*/ true) && "duplicate SchedWrite"); SchedReads.emplace_back(SchedReads.size(), SRDef); } @@ -688,7 +676,7 @@ void CodeGenSchedModels::collectSchedRW() { /*IsRead=*/false); } // Initialize Aliases vectors. - for (Record *ADef : AliasDefs) { + for (const Record *ADef : AliasDefs) { Record *AliasDef = ADef->getValueAsDef("AliasRW"); getSchedRW(AliasDef).IsAlias = true; Record *MatchDef = ADef->getValueAsDef("MatchRW"); @@ -708,9 +696,8 @@ void CodeGenSchedModels::collectSchedRW() { dbgs() << RIdx << ": "; SchedReads[RIdx].dump(); dbgs() << '\n'; - } RecVec RWDefs = Records.getAllDerivedDefinitions("SchedReadWrite"); - for (Record *RWDef - : RWDefs) { + } for (const Record *RWDef + : Records.getAllDerivedDefinitions("SchedReadWrite")) { if (!getSchedRWIdx(RWDef, RWDef->isSubClassOf("SchedRead"))) { StringRef Name = RWDef->getName(); if (Name != "NoWrite" && Name != "ReadDefault") @@ -791,9 +778,8 @@ void CodeGenSchedModels::expandRWSequence(unsigned RWIdx, IdxVec &RWSeq, void CodeGenSchedModels::expandRWSeqForProc( unsigned RWIdx, IdxVec &RWSeq, bool IsRead, const CodeGenProcModel &ProcModel) const { - const CodeGenSchedRW &SchedWrite = getSchedRW(RWIdx, IsRead); - Record *AliasDef = nullptr; + const Record *AliasDef = nullptr; for (const Record *Rec : SchedWrite.Aliases) { const CodeGenSchedRW &AliasRW = getSchedRW(Rec->getValueAsDef("AliasRW")); if (Rec->getValueInit("SchedModel")->isComplete()) { @@ -880,10 +866,8 @@ void CodeGenSchedModels::collectSchedClasses() { InstrClassMap[Inst->TheDef] = SCIdx; } // Create classes for InstRW defs. - RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW"); - llvm::sort(InstRWDefs, LessRecord()); LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (createInstRWClass) +++\n"); - for (Record *RWDef : InstRWDefs) + for (const Record *RWDef : Records.getAllDerivedDefinitions("InstRW")) createInstRWClass(RWDef); NumInstrSchedClasses = SchedClasses.size(); @@ -929,8 +913,7 @@ void CodeGenSchedModels::collectSchedClasses() { dbgs() << '\n'; }); } - const RecVec &RWDefs = SchedClasses[SCIdx].InstRWs; - for (Record *RWDef : RWDefs) { + for (const Record *RWDef : SchedClasses[SCIdx].InstRWs) { const CodeGenProcModel &ProcModel = getProcModel(RWDef->getValueAsDef("SchedModel")); ProcIndices.push_back(ProcModel.Index); @@ -1034,7 +1017,7 @@ unsigned CodeGenSchedModels::addSchedClass(Record *ItinClassDef, // Create classes for each set of opcodes that are in the same InstReadWrite // definition across all processors. -void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) { +void CodeGenSchedModels::createInstRWClass(const Record *InstRWDef) { // ClassInstrs will hold an entry for each subset of Instrs in InstRWDef that // intersects with an existing class via a previous InstRWDef. Instrs that do // not intersect with an existing class refer back to their former class as @@ -1060,7 +1043,7 @@ void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) { // If the all instrs in the current class are accounted for, then leave // them mapped to their old class. if (OldSCIdx) { - const RecVec &RWDefs = SchedClasses[OldSCIdx].InstRWs; + const ConstRecVec &RWDefs = SchedClasses[OldSCIdx].InstRWs; if (!RWDefs.empty()) { const ConstRecVec *OrigInstDefs = Sets.expand(RWDefs[0]); unsigned OrigNumInstrs = @@ -1073,7 +1056,7 @@ void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) { Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel"); // Make sure we didn't already have a InstRW containing this // instruction on this model. - for (Record *RWD : RWDefs) { + for (const Record *RWD : RWDefs) { if (RWD->getValueAsDef("SchedModel") == RWModelDef && RWModelDef->getValueAsBit("FullInstRWOverlapCheck")) { assert(!InstDefs.empty()); // Checked at function start. @@ -1109,8 +1092,8 @@ void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) { SC.ProcIndices.push_back(0); // If we had an old class, copy it's InstRWs to this new class. if (OldSCIdx) { - Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel"); - for (Record *OldRWDef : SchedClasses[OldSCIdx].InstRWs) { + const Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel"); + for (const Record *OldRWDef : SchedClasses[OldSCIdx].InstRWs) { if (OldRWDef->getValueAsDef("SchedModel") == RWModelDef) { assert(!InstDefs.empty()); // Checked at function start. PrintError( @@ -1188,12 +1171,10 @@ void CodeGenSchedModels::collectProcItins() { // Gather the read/write types for each itinerary class. void CodeGenSchedModels::collectProcItinRW() { - RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW"); - llvm::sort(ItinRWDefs, LessRecord()); - for (Record *RWDef : ItinRWDefs) { + for (const Record *RWDef : Records.getAllDerivedDefinitions("ItinRW")) { if (!RWDef->getValueInit("SchedModel")->isComplete()) PrintFatalError(RWDef->getLoc(), "SchedModel is undefined"); - Record *ModelDef = RWDef->getValueAsDef("SchedModel"); + const Record *ModelDef = RWDef->getValueAsDef("SchedModel"); ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef); if (I == ProcModelMap.end()) { PrintFatalError(RWDef->getLoc(), @@ -1262,7 +1243,7 @@ void CodeGenSchedModels::inferFromItinClass(Record *ItinClassDef, void CodeGenSchedModels::inferFromInstRWs(unsigned SCIdx) { for (unsigned I = 0, E = SchedClasses[SCIdx].InstRWs.size(); I != E; ++I) { assert(SchedClasses[SCIdx].InstRWs.size() == E && "InstrRWs was mutated!"); - Record *Rec = SchedClasses[SCIdx].InstRWs[I]; + const Record *Rec = SchedClasses[SCIdx].InstRWs[I]; const std::vector<const Record *> *InstDefs = Sets.expand(Rec); ConstRecIter II = InstDefs->begin(), IE = InstDefs->end(); for (; II != IE; ++II) { @@ -1285,12 +1266,12 @@ namespace { // Helper for substituteVariantOperand. struct TransVariant { - Record *VarOrSeqDef; // Variant or sequence. + const Record *VarOrSeqDef; // Variant or sequence. unsigned RWIdx; // Index of this variant or sequence's matched type. unsigned ProcIdx; // Processor model index or zero for any. unsigned TransVecIdx; // Index into PredTransitions::TransVec. - TransVariant(Record *def, unsigned rwi, unsigned pi, unsigned ti) + TransVariant(const Record *def, unsigned rwi, unsigned pi, unsigned ti) : VarOrSeqDef(def), RWIdx(rwi), ProcIdx(pi), TransVecIdx(ti) {} }; @@ -1299,7 +1280,7 @@ struct TransVariant { struct PredCheck { bool IsRead; unsigned RWIdx; - Record *Predicate; + const Record *Predicate; PredCheck(bool r, unsigned w, Record *p) : IsRead(r), RWIdx(w), Predicate(p) {} @@ -1440,7 +1421,7 @@ void PredTransitions::getIntersectingVariants( GenericRW = true; } } - for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end(); + for (ConstRecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end(); AI != AE; ++AI) { // If either the SchedAlias itself or the SchedReadWrite that it aliases // to is defined within a processor model, constrain all variants to @@ -1644,7 +1625,7 @@ static void addSequences(CodeGenSchedModels &SchedModels, } #ifndef NDEBUG -static void dumpRecVec(const RecVec &RV) { +static void dumpRecVec(const ConstRecVec &RV) { for (const Record *R : RV) dbgs() << R->getName() << ", "; } @@ -1653,7 +1634,7 @@ static void dumpRecVec(const RecVec &RV) { static void dumpTransition(const CodeGenSchedModels &SchedModels, const CodeGenSchedClass &FromSC, const CodeGenSchedTransition &SCTrans, - const RecVec &Preds) { + const ConstRecVec &Preds) { LLVM_DEBUG(dbgs() << "Adding transition from " << FromSC.Name << "(" << FromSC.Index << ") to " << SchedModels.getSchedClass(SCTrans.ToClassIdx).Name << "(" @@ -1690,7 +1671,7 @@ static void inferFromTransitions(ArrayRef<PredTransition> LastTransitions, OperReadsVariant, LastTransition.ProcIndex); // The final PredTerm is unique set of predicates guarding the transition. - RecVec Preds; + ConstRecVec Preds; transform(LastTransition.PredTerm, std::back_inserter(Preds), [](const PredCheck &P) { return P.Predicate; }); Preds.erase(llvm::unique(Preds), Preds.end()); @@ -1781,7 +1762,7 @@ void CodeGenSchedModels::inferFromRW(ArrayRef<unsigned> OperWrites, // Check if any processor resource group contains all resource records in // SubUnits. bool CodeGenSchedModels::hasSuperGroup(RecVec &SubUnits, CodeGenProcModel &PM) { - for (Record *ProcResourceDef : PM.ProcResourceDefs) { + for (const Record *ProcResourceDef : PM.ProcResourceDefs) { if (!ProcResourceDef->isSubClassOf("ProcResGroup")) continue; RecVec SuperUnits = ProcResourceDef->getValueAsListOfDefs("Resources"); @@ -1827,10 +1808,8 @@ void CodeGenSchedModels::verifyProcResourceGroups(CodeGenProcModel &PM) { // Collect all the RegisterFile definitions available in this target. void CodeGenSchedModels::collectRegisterFiles() { - RecVec RegisterFileDefs = Records.getAllDerivedDefinitions("RegisterFile"); - // RegisterFiles is the vector of CodeGenRegisterFile. - for (Record *RF : RegisterFileDefs) { + for (const Record *RF : Records.getAllDerivedDefinitions("RegisterFile")) { // For each register file definition, construct a CodeGenRegisterFile object // and add it to the appropriate scheduling model. CodeGenProcModel &PM = getProcModel(RF->getValueAsDef("SchedModel")); @@ -1883,7 +1862,7 @@ void CodeGenSchedModels::collectProcResources() { // This class may have a default ReadWrite list which can be overriden by // InstRW definitions. - for (Record *RW : SC.InstRWs) { + for (const Record *RW : SC.InstRWs) { Record *RWModelDef = RW->getValueAsDef("SchedModel"); unsigned PIdx = getProcModel(RWModelDef).Index; IdxVec Writes, Reads; @@ -1894,32 +1873,28 @@ void CodeGenSchedModels::collectProcResources() { collectRWResources(SC.Writes, SC.Reads, SC.ProcIndices); } // Add resources separately defined by each subtarget. - RecVec WRDefs = Records.getAllDerivedDefinitions("WriteRes"); - for (Record *WR : WRDefs) { - Record *ModelDef = WR->getValueAsDef("SchedModel"); + for (const Record *WR : Records.getAllDerivedDefinitions("WriteRes")) { + const Record *ModelDef = WR->getValueAsDef("SchedModel"); addWriteRes(WR, getProcModel(ModelDef).Index); } - RecVec SWRDefs = Records.getAllDerivedDefinitions("SchedWriteRes"); - for (Record *SWR : SWRDefs) { - Record *ModelDef = SWR->getValueAsDef("SchedModel"); + for (const Record *SWR : Records.getAllDerivedDefinitions("SchedWriteRes")) { + const Record *ModelDef = SWR->getValueAsDef("SchedModel"); addWriteRes(SWR, getProcModel(ModelDef).Index); } - RecVec RADefs = Records.getAllDerivedDefinitions("ReadAdvance"); - for (Record *RA : RADefs) { - Record *ModelDef = RA->getValueAsDef("SchedModel"); + for (const Record *RA : Records.getAllDerivedDefinitions("ReadAdvance")) { + const Record *ModelDef = RA->getValueAsDef("SchedModel"); addReadAdvance(RA, getProcModel(ModelDef).Index); } - RecVec SRADefs = Records.getAllDerivedDefinitions("SchedReadAdvance"); - for (Record *SRA : SRADefs) { + for (const Record *SRA : + Records.getAllDerivedDefinitions("SchedReadAdvance")) { if (SRA->getValueInit("SchedModel")->isComplete()) { - Record *ModelDef = SRA->getValueAsDef("SchedModel"); + const Record *ModelDef = SRA->getValueAsDef("SchedModel"); addReadAdvance(SRA, getProcModel(ModelDef).Index); } } // Add ProcResGroups that are defined within this processor model, which may // not be directly referenced but may directly specify a buffer size. - RecVec ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup"); - for (Record *PRG : ProcResGroups) { + for (const Record *PRG : Records.getAllDerivedDefinitions("ProcResGroup")) { if (!PRG->getValueInit("SchedModel")->isComplete()) continue; CodeGenProcModel &PM = getProcModel(PRG->getValueAsDef("SchedModel")); @@ -1927,7 +1902,8 @@ void CodeGenSchedModels::collectProcResources() { PM.ProcResourceDefs.push_back(PRG); } // Add ProcResourceUnits unconditionally. - for (Record *PRU : Records.getAllDerivedDefinitions("ProcResourceUnits")) { + for (const Record *PRU : + Records.getAllDerivedDefinitions("ProcResourceUnits")) { if (!PRU->getValueInit("SchedModel")->isComplete()) continue; CodeGenProcModel &PM = getProcModel(PRU->getValueAsDef("SchedModel")); @@ -1947,7 +1923,7 @@ void CodeGenSchedModels::collectProcResources() { else dbgs() << WriteResDef->getName() << " "; } dbgs() << "\nReadAdvanceDefs: "; - for (Record *ReadAdvanceDef + for (const Record *ReadAdvanceDef : PM.ReadAdvanceDefs) { if (ReadAdvanceDef->isSubClassOf("ReadAdvance")) dbgs() << ReadAdvanceDef->getValueAsDef("ReadType")->getName() @@ -1956,7 +1932,7 @@ void CodeGenSchedModels::collectProcResources() { dbgs() << ReadAdvanceDef->getName() << " "; } dbgs() << "\nProcResourceDefs: "; - for (Record *ProcResourceDef + for (const Record *ProcResourceDef : PM.ProcResourceDefs) { dbgs() << ProcResourceDef->getName() << " "; } dbgs() @@ -1998,7 +1974,7 @@ void CodeGenSchedModels::checkCompleteness() { SC.ItinClassDef->getName() != "NoItinerary") continue; - const RecVec &InstRWs = SC.InstRWs; + const ConstRecVec &InstRWs = SC.InstRWs; auto I = find_if(InstRWs, [&ProcModel](const Record *R) { return R->getValueAsDef("SchedModel") == ProcModel.ModelDef; }); @@ -2033,18 +2009,17 @@ void CodeGenSchedModels::collectItinProcResources(Record *ItinClassDef) { const CodeGenProcModel &PM = ProcModels[PIdx]; // For all ItinRW entries. bool HasMatch = false; - for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end(); II != IE; - ++II) { - RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses"); + for (const Record *R : PM.ItinRWDefs) { + RecVec Matched = R->getValueAsListOfDefs("MatchedItinClasses"); if (!llvm::is_contained(Matched, ItinClassDef)) continue; if (HasMatch) - PrintFatalError((*II)->getLoc(), + PrintFatalError(R->getLoc(), "Duplicate itinerary class " + ItinClassDef->getName() + " in ItinResources for " + PM.ModelName); HasMatch = true; IdxVec Writes, Reads; - findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); + findRWs(R->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); collectRWResources(Writes, Reads, PIdx); } } @@ -2092,17 +2067,17 @@ void CodeGenSchedModels::collectRWResources(ArrayRef<unsigned> Writes, } // Find the processor's resource units for this kind of resource. -Record *CodeGenSchedModels::findProcResUnits(Record *ProcResKind, - const CodeGenProcModel &PM, - ArrayRef<SMLoc> Loc) const { +const Record *CodeGenSchedModels::findProcResUnits(const Record *ProcResKind, + const CodeGenProcModel &PM, + ArrayRef<SMLoc> Loc) const { if (ProcResKind->isSubClassOf("ProcResourceUnits")) return ProcResKind; - Record *ProcUnitDef = nullptr; + const Record *ProcUnitDef = nullptr; assert(!ProcResourceDefs.empty()); assert(!ProcResGroups.empty()); - for (Record *ProcResDef : ProcResourceDefs) { + for (const Record *ProcResDef : ProcResourceDefs) { if (ProcResDef->getValueAsDef("Kind") == ProcResKind && ProcResDef->getValueAsDef("SchedModel") == PM.ModelDef) { if (ProcUnitDef) { @@ -2113,7 +2088,7 @@ Record *CodeGenSchedModels::findProcResUnits(Record *ProcResKind, ProcUnitDef = ProcResDef; } } - for (Record *ProcResGroup : ProcResGroups) { + for (const Record *ProcResGroup : ProcResGroups) { if (ProcResGroup == ProcResKind && ProcResGroup->getValueAsDef("SchedModel") == PM.ModelDef) { if (ProcUnitDef) { @@ -2132,11 +2107,11 @@ Record *CodeGenSchedModels::findProcResUnits(Record *ProcResKind, } // Iteratively add a resource and its super resources. -void CodeGenSchedModels::addProcResource(Record *ProcResKind, +void CodeGenSchedModels::addProcResource(const Record *ProcResKind, CodeGenProcModel &PM, ArrayRef<SMLoc> Loc) { while (true) { - Record *ProcResUnits = findProcResUnits(ProcResKind, PM, Loc); + const Record *ProcResUnits = findProcResUnits(ProcResKind, PM, Loc); // See if this ProcResource is already associated with this processor. if (is_contained(PM.ProcResourceDefs, ProcResUnits)) @@ -2154,23 +2129,24 @@ void CodeGenSchedModels::addProcResource(Record *ProcResKind, } // Add resources for a SchedWrite to this processor if they don't exist. -void CodeGenSchedModels::addWriteRes(Record *ProcWriteResDef, unsigned PIdx) { +void CodeGenSchedModels::addWriteRes(const Record *ProcWriteResDef, + unsigned PIdx) { assert(PIdx && "don't add resources to an invalid Processor model"); - RecVec &WRDefs = ProcModels[PIdx].WriteResDefs; + ConstRecVec &WRDefs = ProcModels[PIdx].WriteResDefs; if (is_contained(WRDefs, ProcWriteResDef)) return; WRDefs.push_back(ProcWriteResDef); // Visit ProcResourceKinds referenced by the newly discovered WriteRes. RecVec ProcResDefs = ProcWriteResDef->getValueAsListOfDefs("ProcResources"); - for (auto *ProcResDef : ProcResDefs) { + for (const Record *ProcResDef : ProcResDefs) { addProcResource(ProcResDef, ProcModels[PIdx], ProcWriteResDef->getLoc()); } } // Add resources for a ReadAdvance to this processor if they don't exist. -void CodeGenSchedModels::addReadAdvance(Record *ProcReadAdvanceDef, +void CodeGenSchedModels::addReadAdvance(const Record *ProcReadAdvanceDef, unsigned PIdx) { for (const Record *ValidWrite : ProcReadAdvanceDef->getValueAsListOfDefs("ValidWrites")) @@ -2181,14 +2157,14 @@ void CodeGenSchedModels::addReadAdvance(Record *ProcReadAdvanceDef, "any instruction (" + ValidWrite->getName() + ")"); - RecVec &RADefs = ProcModels[PIdx].ReadAdvanceDefs; + ConstRecVec &RADefs = ProcModels[PIdx].ReadAdvanceDefs; if (is_contained(RADefs, ProcReadAdvanceDef)) return; RADefs.push_back(ProcReadAdvanceDef); } -unsigned CodeGenProcModel::getProcResourceIdx(Record *PRDef) const { - RecIter PRPos = find(ProcResourceDefs, PRDef); +unsigned CodeGenProcModel::getProcResourceIdx(const Record *PRDef) const { + ConstRecIter PRPos = find(ProcResourceDefs, PRDef); if (PRPos == ProcResourceDefs.end()) PrintFatalError(PRDef->getLoc(), "ProcResource def is not included in " "the ProcResources list for " + @@ -2208,7 +2184,7 @@ bool CodeGenProcModel::isUnsupported(const CodeGenInstruction &Inst) const { return false; } -bool CodeGenProcModel::hasReadOfWrite(Record *WriteDef) const { +bool CodeGenProcModel::hasReadOfWrite(const Record *WriteDef) const { for (auto &RADef : ReadAdvanceDefs) { RecVec ValidWrites = RADef->getValueAsListOfDefs("ValidWrites"); if (is_contained(ValidWrites, WriteDef)) diff --git a/llvm/utils/TableGen/Common/CodeGenSchedule.h b/llvm/utils/TableGen/Common/CodeGenSchedule.h index 57a0986..ff85ac3 100644 --- a/llvm/utils/TableGen/Common/CodeGenSchedule.h +++ b/llvm/utils/TableGen/Common/CodeGenSchedule.h @@ -51,19 +51,19 @@ using IdxIter = IdxVec::const_iterator; struct CodeGenSchedRW { unsigned Index; std::string Name; - Record *TheDef; + const Record *TheDef; bool IsRead; bool IsAlias; bool HasVariants; bool IsVariadic; bool IsSequence; IdxVec Sequence; - RecVec Aliases; + ConstRecVec Aliases; CodeGenSchedRW() : Index(0), TheDef(nullptr), IsRead(false), IsAlias(false), HasVariants(false), IsVariadic(false), IsSequence(false) {} - CodeGenSchedRW(unsigned Idx, Record *Def) + CodeGenSchedRW(unsigned Idx, const Record *Def) : Index(Idx), TheDef(Def), IsAlias(false), IsVariadic(false) { Name = std::string(Def->getName()); IsRead = Def->isSubClassOf("SchedRead"); @@ -102,7 +102,7 @@ struct CodeGenSchedRW { struct CodeGenSchedTransition { unsigned ToClassIdx; unsigned ProcIndex; - RecVec PredTerm; + ConstRecVec PredTerm; }; /// Scheduling class. @@ -145,7 +145,7 @@ struct CodeGenSchedClass { // Instruction no longer mapped to this class by InstrClassMap. These // Instructions should be ignored by this class because they have been split // off to join another inferred class. - RecVec InstRWs; + ConstRecVec InstRWs; // InstRWs processor indices. Filled in inferFromInstRWs DenseSet<unsigned> InstRWProcIndices; @@ -189,14 +189,14 @@ struct CodeGenRegisterCost { /// stalls due to register pressure. struct CodeGenRegisterFile { std::string Name; - Record *RegisterFileDef; + const Record *RegisterFileDef; unsigned MaxMovesEliminatedPerCycle; bool AllowZeroMoveEliminationOnly; unsigned NumPhysRegs; std::vector<CodeGenRegisterCost> Costs; - CodeGenRegisterFile(StringRef name, Record *def, + CodeGenRegisterFile(StringRef name, const Record *def, unsigned MaxMoveElimPerCy = 0, bool AllowZeroMoveElimOnly = false) : Name(name), RegisterFileDef(def), @@ -223,8 +223,8 @@ struct CodeGenRegisterFile { struct CodeGenProcModel { unsigned Index; std::string ModelName; - Record *ModelDef; - Record *ItinsDef; + const Record *ModelDef; + const Record *ItinsDef; // Derived members... @@ -235,30 +235,31 @@ struct CodeGenProcModel { // Map itinerary classes to per-operand resources. // This list is empty if no ItinRW refers to this Processor. - RecVec ItinRWDefs; + ConstRecVec ItinRWDefs; // List of unsupported feature. // This list is empty if the Processor has no UnsupportedFeatures. RecVec UnsupportedFeaturesDefs; // All read/write resources associated with this processor. - RecVec WriteResDefs; - RecVec ReadAdvanceDefs; + ConstRecVec WriteResDefs; + ConstRecVec ReadAdvanceDefs; // Per-operand machine model resources associated with this processor. - RecVec ProcResourceDefs; + ConstRecVec ProcResourceDefs; // List of Register Files. std::vector<CodeGenRegisterFile> RegisterFiles; // Optional Retire Control Unit definition. - Record *RetireControlUnit; + const Record *RetireControlUnit; // Load/Store queue descriptors. - Record *LoadQueue; - Record *StoreQueue; + const Record *LoadQueue; + const Record *StoreQueue; - CodeGenProcModel(unsigned Idx, std::string Name, Record *MDef, Record *IDef) + CodeGenProcModel(unsigned Idx, std::string Name, const Record *MDef, + const Record *IDef) : Index(Idx), ModelName(std::move(Name)), ModelDef(MDef), ItinsDef(IDef), RetireControlUnit(nullptr), LoadQueue(nullptr), StoreQueue(nullptr) {} @@ -275,12 +276,12 @@ struct CodeGenProcModel { !RegisterFiles.empty(); } - unsigned getProcResourceIdx(Record *PRDef) const; + unsigned getProcResourceIdx(const Record *PRDef) const; bool isUnsupported(const CodeGenInstruction &Inst) const; // Return true if the given write record is referenced by a ReadAdvance. - bool hasReadOfWrite(Record *WriteDef) const; + bool hasReadOfWrite(const Record *WriteDef) const; #ifndef NDEBUG void dump() const; @@ -421,7 +422,7 @@ using ProcModelMapTy = DenseMap<const Record *, unsigned>; /// Top level container for machine model data. class CodeGenSchedModels { - RecordKeeper &Records; + const RecordKeeper &Records; const CodeGenTarget &Target; // Map dag expressions to Instruction lists. @@ -443,8 +444,8 @@ class CodeGenSchedModels { // Any inferred SchedClass has an index greater than NumInstrSchedClassses. unsigned NumInstrSchedClasses; - RecVec ProcResourceDefs; - RecVec ProcResGroups; + ConstRecVec ProcResourceDefs; + ConstRecVec ProcResGroups; // Map each instruction to its unique SchedClass index considering the // combination of it's itinerary class, SchedRW list, and InstRW records. @@ -455,7 +456,7 @@ class CodeGenSchedModels { std::vector<unsigned> getAllProcIndices() const; public: - CodeGenSchedModels(RecordKeeper &RK, const CodeGenTarget &TGT); + CodeGenSchedModels(const RecordKeeper &RK, const CodeGenTarget &TGT); // iterator access to the scheduling classes. using class_iterator = std::vector<CodeGenSchedClass>::iterator; @@ -477,9 +478,9 @@ public: return make_range(classes_begin(), classes_begin() + NumInstrSchedClasses); } - Record *getModelOrItinDef(Record *ProcDef) const { - Record *ModelDef = ProcDef->getValueAsDef("SchedModel"); - Record *ItinsDef = ProcDef->getValueAsDef("ProcItin"); + const Record *getModelOrItinDef(const Record *ProcDef) const { + const Record *ModelDef = ProcDef->getValueAsDef("SchedModel"); + const Record *ItinsDef = ProcDef->getValueAsDef("ProcItin"); if (!ItinsDef->getValueAsListOfDefs("IID").empty()) { assert(ModelDef->getValueAsBit("NoModel") && "Itineraries must be defined within SchedMachineModel"); @@ -488,19 +489,19 @@ public: return ModelDef; } - const CodeGenProcModel &getModelForProc(Record *ProcDef) const { - Record *ModelDef = getModelOrItinDef(ProcDef); + const CodeGenProcModel &getModelForProc(const Record *ProcDef) const { + const Record *ModelDef = getModelOrItinDef(ProcDef); ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef); assert(I != ProcModelMap.end() && "missing machine model"); return ProcModels[I->second]; } - CodeGenProcModel &getProcModel(Record *ModelDef) { + CodeGenProcModel &getProcModel(const Record *ModelDef) { ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef); assert(I != ProcModelMap.end() && "missing machine model"); return ProcModels[I->second]; } - const CodeGenProcModel &getProcModel(Record *ModelDef) const { + const CodeGenProcModel &getProcModel(const Record *ModelDef) const { return const_cast<CodeGenSchedModels *>(this)->getProcModel(ModelDef); } @@ -575,8 +576,9 @@ public: unsigned findOrInsertRW(ArrayRef<unsigned> Seq, bool IsRead); - Record *findProcResUnits(Record *ProcResKind, const CodeGenProcModel &PM, - ArrayRef<SMLoc> Loc) const; + const Record *findProcResUnits(const Record *ProcResKind, + const CodeGenProcModel &PM, + ArrayRef<SMLoc> Loc) const; ArrayRef<STIPredicateFunction> getSTIPredicates() const { return STIPredicates; @@ -586,7 +588,7 @@ private: void collectProcModels(); // Initialize a new processor model if it is unique. - void addProcModel(Record *ProcDef); + void addProcModel(const Record *ProcDef); void collectSchedRW(); @@ -605,7 +607,7 @@ private: ArrayRef<unsigned> OperWrites, ArrayRef<unsigned> OperReads); std::string createSchedClassName(const ConstRecVec &InstDefs); - void createInstRWClass(Record *InstRWDef); + void createInstRWClass(const Record *InstRWDef); void collectProcItins(); @@ -643,12 +645,12 @@ private: void collectRWResources(ArrayRef<unsigned> Writes, ArrayRef<unsigned> Reads, ArrayRef<unsigned> ProcIndices); - void addProcResource(Record *ProcResourceKind, CodeGenProcModel &PM, + void addProcResource(const Record *ProcResourceKind, CodeGenProcModel &PM, ArrayRef<SMLoc> Loc); - void addWriteRes(Record *ProcWriteResDef, unsigned PIdx); + void addWriteRes(const Record *ProcWriteResDef, unsigned PIdx); - void addReadAdvance(Record *ProcReadAdvanceDef, unsigned PIdx); + void addReadAdvance(const Record *ProcReadAdvanceDef, unsigned PIdx); }; } // namespace llvm diff --git a/llvm/utils/TableGen/Common/CodeGenTarget.cpp b/llvm/utils/TableGen/Common/CodeGenTarget.cpp index 3cd7e4e..69d2c70 100644 --- a/llvm/utils/TableGen/Common/CodeGenTarget.cpp +++ b/llvm/utils/TableGen/Common/CodeGenTarget.cpp @@ -85,9 +85,9 @@ std::string llvm::getQualifiedName(const Record *R) { /// getTarget - Return the current instance of the Target class. /// -CodeGenTarget::CodeGenTarget(RecordKeeper &records) +CodeGenTarget::CodeGenTarget(const RecordKeeper &records) : Records(records), CGH(records), Intrinsics(records) { - std::vector<Record *> Targets = Records.getAllDerivedDefinitions("Target"); + ArrayRef<const Record *> Targets = Records.getAllDerivedDefinitions("Target"); if (Targets.size() == 0) PrintFatalError("No 'Target' subclasses defined!"); if (Targets.size() != 1) @@ -223,11 +223,6 @@ std::optional<CodeGenRegisterClass *> CodeGenTarget::getSuperRegForSubReg( return Candidates[0]; } -void CodeGenTarget::ReadRegAltNameIndices() const { - RegAltNameIndices = Records.getAllDerivedDefinitions("RegAltNameIndex"); - llvm::sort(RegAltNameIndices, LessRecord()); -} - /// getRegisterByName - If there is a register with the specific AsmName, /// return it. const CodeGenRegister *CodeGenTarget::getRegisterByName(StringRef Name) const { @@ -271,12 +266,13 @@ CodeGenSchedModels &CodeGenTarget::getSchedModels() const { } void CodeGenTarget::ReadInstructions() const { - std::vector<Record *> Insts = Records.getAllDerivedDefinitions("Instruction"); + ArrayRef<const Record *> Insts = + Records.getAllDerivedDefinitions("Instruction"); if (Insts.size() <= 2) PrintFatalError("No 'Instruction' subclasses defined!"); // Parse the instructions defined in the .td file. - for (Record *R : Insts) { + for (const Record *R : Insts) { Instructions[R] = std::make_unique<CodeGenInstruction>(R); if (Instructions[R]->isVariableLengthEncoding()) HasVariableLengthEncodings = true; @@ -286,7 +282,7 @@ void CodeGenTarget::ReadInstructions() const { static const CodeGenInstruction *GetInstByName( const char *Name, const DenseMap<const Record *, std::unique_ptr<CodeGenInstruction>> &Insts, - RecordKeeper &Records) { + const RecordKeeper &Records) { const Record *Rec = Records.getDef(Name); const auto I = Insts.find(Rec); @@ -358,9 +354,8 @@ void CodeGenTarget::reverseBitsForLittleEndianEncoding() { if (!isLittleEndianEncoding()) return; - std::vector<Record *> Insts = - Records.getAllDerivedDefinitions("InstructionEncoding"); - for (Record *R : Insts) { + for (const Record *R : + Records.getAllDerivedDefinitions("InstructionEncoding")) { if (R->getValueAsString("Namespace") == "TargetOpcode" || R->getValueAsBit("isPseudo")) continue; @@ -383,11 +378,15 @@ void CodeGenTarget::reverseBitsForLittleEndianEncoding() { NewBits[middle] = BI->getBit(middle); } - BitsInit *NewBI = BitsInit::get(Records, NewBits); + RecordKeeper &MutableRC = const_cast<RecordKeeper &>(Records); + BitsInit *NewBI = BitsInit::get(MutableRC, NewBits); - // Update the bits in reversed order so that emitInstrOpBits will get the - // correct endianness. - R->getValue("Inst")->setValue(NewBI); + // Update the bits in reversed order so that emitters will get the correct + // endianness. + // FIXME: Eliminate mutation of TG records by creating a helper function + // to reverse bits and maintain a cache instead of mutating records. + Record *MutableR = const_cast<Record *>(R); + MutableR->getValue("Inst")->setValue(NewBI); } } @@ -403,11 +402,11 @@ bool CodeGenTarget::guessInstructionProperties() const { //===----------------------------------------------------------------------===// // ComplexPattern implementation // -ComplexPattern::ComplexPattern(Record *R) { +ComplexPattern::ComplexPattern(const Record *R) { Ty = R->getValueAsDef("Ty"); NumOperands = R->getValueAsInt("NumOperands"); SelectFunc = std::string(R->getValueAsString("SelectFunc")); - RootNodes = R->getValueAsListOfDefs("RootNodes"); + RootNodes = R->getValueAsListOfConstDefs("RootNodes"); // FIXME: This is a hack to statically increase the priority of patterns which // maps a sub-dag to a complex pattern. e.g. favors LEA over ADD. To get best diff --git a/llvm/utils/TableGen/Common/CodeGenTarget.h b/llvm/utils/TableGen/Common/CodeGenTarget.h index 79001a2..225bdd9 100644 --- a/llvm/utils/TableGen/Common/CodeGenTarget.h +++ b/llvm/utils/TableGen/Common/CodeGenTarget.h @@ -56,19 +56,18 @@ std::string getQualifiedName(const Record *R); /// CodeGenTarget - This class corresponds to the Target class in the .td files. /// class CodeGenTarget { - RecordKeeper &Records; - Record *TargetRec; + const RecordKeeper &Records; + const Record *TargetRec; mutable DenseMap<const Record *, std::unique_ptr<CodeGenInstruction>> Instructions; mutable std::unique_ptr<CodeGenRegBank> RegBank; - mutable std::vector<Record *> RegAltNameIndices; + mutable ArrayRef<const Record *> RegAltNameIndices; mutable SmallVector<ValueTypeByHwMode, 8> LegalValueTypes; CodeGenHwModes CGH; - std::vector<Record *> MacroFusions; + ArrayRef<const Record *> MacroFusions; mutable bool HasVariableLengthEncodings = false; - void ReadRegAltNameIndices() const; void ReadInstructions() const; void ReadLegalValueTypes() const; @@ -81,10 +80,10 @@ class CodeGenTarget { mutable unsigned NumPseudoInstructions = 0; public: - CodeGenTarget(RecordKeeper &Records); + CodeGenTarget(const RecordKeeper &Records); ~CodeGenTarget(); - Record *getTargetRecord() const { return TargetRec; } + const Record *getTargetRecord() const { return TargetRec; } StringRef getName() const; /// getInstNamespace - Return the target-specific instruction namespace. @@ -135,9 +134,9 @@ public: /// return it. const CodeGenRegister *getRegisterByName(StringRef Name) const; - const std::vector<Record *> &getRegAltNameIndices() const { + ArrayRef<const Record *> getRegAltNameIndices() const { if (RegAltNameIndices.empty()) - ReadRegAltNameIndices(); + RegAltNameIndices = Records.getAllDerivedDefinitions("RegAltNameIndex"); return RegAltNameIndices; } @@ -159,7 +158,7 @@ public: bool hasMacroFusion() const { return !MacroFusions.empty(); } - const std::vector<Record *> getMacroFusions() const { return MacroFusions; } + ArrayRef<const Record *> getMacroFusions() const { return MacroFusions; } private: DenseMap<const Record *, std::unique_ptr<CodeGenInstruction>> & @@ -239,20 +238,20 @@ private: /// ComplexPattern - ComplexPattern info, corresponding to the ComplexPattern /// tablegen class in TargetSelectionDAG.td class ComplexPattern { - Record *Ty; + const Record *Ty; unsigned NumOperands; std::string SelectFunc; - std::vector<Record *> RootNodes; + std::vector<const Record *> RootNodes; unsigned Properties; // Node properties unsigned Complexity; public: - ComplexPattern(Record *R); + ComplexPattern(const Record *R); - Record *getValueType() const { return Ty; } + const Record *getValueType() const { return Ty; } unsigned getNumOperands() const { return NumOperands; } const std::string &getSelectFunc() const { return SelectFunc; } - const std::vector<Record *> &getRootNodes() const { return RootNodes; } + const ArrayRef<const Record *> getRootNodes() const { return RootNodes; } bool hasProperty(enum SDNP Prop) const { return Properties & (1 << Prop); } unsigned getComplexity() const { return Complexity; } }; diff --git a/llvm/utils/TableGen/Common/Utils.cpp b/llvm/utils/TableGen/Common/Utils.cpp index 29b5120..b7a8f4d 100644 --- a/llvm/utils/TableGen/Common/Utils.cpp +++ b/llvm/utils/TableGen/Common/Utils.cpp @@ -27,7 +27,7 @@ struct LessRecordFieldNameAndID { /// Sort an array of Records on the "Name" field, and check for records with /// duplicate "Name" field. If duplicates are found, report a fatal error. -void llvm::sortAndReportDuplicates(MutableArrayRef<Record *> Records, +void llvm::sortAndReportDuplicates(MutableArrayRef<const Record *> Records, StringRef ObjectName) { llvm::sort(Records, LessRecordFieldNameAndID()); diff --git a/llvm/utils/TableGen/Common/Utils.h b/llvm/utils/TableGen/Common/Utils.h index 522f541c..bc3e8eb 100644 --- a/llvm/utils/TableGen/Common/Utils.h +++ b/llvm/utils/TableGen/Common/Utils.h @@ -17,7 +17,7 @@ class Record; /// Sort an array of Records on the "Name" field, and check for records with /// duplicate "Name" field. If duplicates are found, report a fatal error. -void sortAndReportDuplicates(MutableArrayRef<Record *> Records, +void sortAndReportDuplicates(MutableArrayRef<const Record *> Records, StringRef ObjectName); } // namespace llvm diff --git a/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp b/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp index 96a40f0..06eb1f7 100644 --- a/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp +++ b/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp @@ -1214,7 +1214,7 @@ void MatcherTableEmitter::EmitPredicateFunctions(raw_ostream &OS) { const CodeGenDAGPatterns::NodeXForm &Entry = CGP.getSDNodeTransform(NodeXForms[i]); - Record *SDNode = Entry.first; + const Record *SDNode = Entry.first; const std::string &Code = Entry.second; OS << " case " << i << ": { "; diff --git a/llvm/utils/TableGen/DAGISelMatcherGen.cpp b/llvm/utils/TableGen/DAGISelMatcherGen.cpp index bb8f4dc..5cb393a 100644 --- a/llvm/utils/TableGen/DAGISelMatcherGen.cpp +++ b/llvm/utils/TableGen/DAGISelMatcherGen.cpp @@ -555,7 +555,7 @@ bool MatcherGen::EmitMatcherCode(unsigned Variant) { // check. if (const ComplexPattern *CP = Pattern.getSrcPattern().getComplexPatternInfo(CGP)) { - const std::vector<Record *> &OpNodes = CP->getRootNodes(); + ArrayRef<const Record *> OpNodes = CP->getRootNodes(); assert(!OpNodes.empty() && "Complex Pattern must specify what it can match"); if (Variant >= OpNodes.size()) diff --git a/llvm/utils/TableGen/DFAPacketizerEmitter.cpp b/llvm/utils/TableGen/DFAPacketizerEmitter.cpp index 3c74df0..55cb39c 100644 --- a/llvm/utils/TableGen/DFAPacketizerEmitter.cpp +++ b/llvm/utils/TableGen/DFAPacketizerEmitter.cpp @@ -100,13 +100,13 @@ int DFAPacketizerEmitter::collectAllFuncUnits( LLVM_DEBUG(dbgs() << "collectAllFuncUnits"); LLVM_DEBUG(dbgs() << " (" << ProcModels.size() << " itineraries)\n"); - std::set<Record *> ProcItinList; + std::set<const Record *> ProcItinList; for (const CodeGenProcModel *Model : ProcModels) ProcItinList.insert(Model->ItinsDef); int totalFUs = 0; // Parse functional units for all the itineraries. - for (Record *Proc : ProcItinList) { + for (const Record *Proc : ProcItinList) { std::vector<Record *> FUs = Proc->getValueAsListOfDefs("FU"); LLVM_DEBUG(dbgs() << " FU:" diff --git a/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/llvm/utils/TableGen/RegisterInfoEmitter.cpp index d1c21ba..e076832 100644 --- a/llvm/utils/TableGen/RegisterInfoEmitter.cpp +++ b/llvm/utils/TableGen/RegisterInfoEmitter.cpp @@ -152,8 +152,7 @@ void RegisterInfoEmitter::runEnums(raw_ostream &OS, CodeGenTarget &Target, OS << "} // end namespace " << Namespace << "\n\n"; } - const std::vector<Record *> &RegAltNameIndices = - Target.getRegAltNameIndices(); + ArrayRef<const Record *> RegAltNameIndices = Target.getRegAltNameIndices(); // If the only definition is the default NoRegAltName, we don't need to // emit anything. if (RegAltNameIndices.size() > 1) { diff --git a/llvm/utils/TableGen/SubtargetEmitter.cpp b/llvm/utils/TableGen/SubtargetEmitter.cpp index 7ae61cb..394e2eb 100644 --- a/llvm/utils/TableGen/SubtargetEmitter.cpp +++ b/llvm/utils/TableGen/SubtargetEmitter.cpp @@ -43,6 +43,8 @@ using namespace llvm; namespace { +using FeatureMapTy = DenseMap<const Record *, unsigned>; + /// Sorting predicate to sort record pointers by their /// FieldName field. struct LessRecordFieldFieldName { @@ -81,22 +83,22 @@ class SubtargetEmitter { }; CodeGenTarget TGT; - RecordKeeper &Records; + const RecordKeeper &Records; CodeGenSchedModels &SchedModels; std::string Target; - void Enumeration(raw_ostream &OS, DenseMap<Record *, unsigned> &FeatureMap); + FeatureMapTy Enumeration(raw_ostream &OS); void EmitSubtargetInfoMacroCalls(raw_ostream &OS); - unsigned FeatureKeyValues(raw_ostream &OS, - const DenseMap<Record *, unsigned> &FeatureMap); - unsigned CPUKeyValues(raw_ostream &OS, - const DenseMap<Record *, unsigned> &FeatureMap); - void FormItineraryStageString(const std::string &Names, Record *ItinData, - std::string &ItinString, unsigned &NStages); - void FormItineraryOperandCycleString(Record *ItinData, + unsigned FeatureKeyValues(raw_ostream &OS, const FeatureMapTy &FeatureMap); + unsigned CPUKeyValues(raw_ostream &OS, const FeatureMapTy &FeatureMap); + void FormItineraryStageString(const std::string &Names, + const Record *ItinData, std::string &ItinString, + unsigned &NStages); + void FormItineraryOperandCycleString(const Record *ItinData, std::string &ItinString, unsigned &NOperandCycles); - void FormItineraryBypassString(const std::string &Names, Record *ItinData, + void FormItineraryBypassString(const std::string &Names, + const Record *ItinData, std::string &ItinString, unsigned NOperandCycles); void EmitStageAndOperandCycleData( @@ -115,11 +117,12 @@ class SubtargetEmitter { raw_ostream &OS); void EmitProcessorResources(const CodeGenProcModel &ProcModel, raw_ostream &OS); - Record *FindWriteResources(const CodeGenSchedRW &SchedWrite, - const CodeGenProcModel &ProcModel); - Record *FindReadAdvance(const CodeGenSchedRW &SchedRead, - const CodeGenProcModel &ProcModel); - void ExpandProcResources(RecVec &PRVec, std::vector<int64_t> &ReleaseAtCycles, + const Record *FindWriteResources(const CodeGenSchedRW &SchedWrite, + const CodeGenProcModel &ProcModel); + const Record *FindReadAdvance(const CodeGenSchedRW &SchedRead, + const CodeGenProcModel &ProcModel); + void ExpandProcResources(ConstRecVec &PRVec, + std::vector<int64_t> &ReleaseAtCycles, std::vector<int64_t> &AcquireAtCycles, const CodeGenProcModel &ProcModel); void GenSchedClassTables(const CodeGenProcModel &ProcModel, @@ -138,7 +141,7 @@ class SubtargetEmitter { void ParseFeaturesFunction(raw_ostream &OS); public: - SubtargetEmitter(RecordKeeper &R) + SubtargetEmitter(const RecordKeeper &R) : TGT(R), Records(R), SchedModels(TGT.getSchedModels()), Target(TGT.getName()) {} @@ -150,16 +153,13 @@ public: // // Enumeration - Emit the specified class as an enumeration. // -void SubtargetEmitter::Enumeration(raw_ostream &OS, - DenseMap<Record *, unsigned> &FeatureMap) { - // Get all records of class and sort - std::vector<Record *> DefList = +FeatureMapTy SubtargetEmitter::Enumeration(raw_ostream &OS) { + ArrayRef<const Record *> DefList = Records.getAllDerivedDefinitions("SubtargetFeature"); - llvm::sort(DefList, LessRecord()); unsigned N = DefList.size(); if (N == 0) - return; + return FeatureMapTy(); if (N + 1 > MAX_SUBTARGET_FEATURES) PrintFatalError( "Too many subtarget features! Bump MAX_SUBTARGET_FEATURES."); @@ -169,10 +169,11 @@ void SubtargetEmitter::Enumeration(raw_ostream &OS, // Open enumeration. OS << "enum {\n"; + FeatureMapTy FeatureMap; // For each record for (unsigned i = 0; i < N; ++i) { // Next record - Record *Def = DefList[i]; + const Record *Def = DefList[i]; // Get and emit name OS << " " << Def->getName() << " = " << i << ",\n"; @@ -187,10 +188,12 @@ void SubtargetEmitter::Enumeration(raw_ostream &OS, // Close enumeration and namespace OS << "};\n"; OS << "} // end namespace " << Target << "\n"; + return FeatureMap; } -static void printFeatureMask(raw_ostream &OS, RecVec &FeatureList, - const DenseMap<Record *, unsigned> &FeatureMap) { +static void printFeatureMask(raw_ostream &OS, + ArrayRef<const Record *> FeatureList, + const FeatureMapTy &FeatureMap) { std::array<uint64_t, MAX_SUBTARGET_WORDS> Mask = {}; for (const Record *Feature : FeatureList) { unsigned Bit = FeatureMap.lookup(Feature); @@ -211,7 +214,7 @@ static void printFeatureMask(raw_ostream &OS, RecVec &FeatureList, void SubtargetEmitter::EmitSubtargetInfoMacroCalls(raw_ostream &OS) { OS << "\n#ifdef GET_SUBTARGETINFO_MACRO\n"; - std::vector<Record *> FeatureList = + std::vector<const Record *> FeatureList = Records.getAllDerivedDefinitions("SubtargetFeature"); llvm::sort(FeatureList, LessRecordFieldFieldName()); @@ -249,9 +252,9 @@ void SubtargetEmitter::EmitSubtargetInfoMacroCalls(raw_ostream &OS) { // FeatureKeyValues - Emit data of all the subtarget features. Used by the // command line. // -unsigned SubtargetEmitter::FeatureKeyValues( - raw_ostream &OS, const DenseMap<Record *, unsigned> &FeatureMap) { - std::vector<Record *> FeatureList = +unsigned SubtargetEmitter::FeatureKeyValues(raw_ostream &OS, + const FeatureMapTy &FeatureMap) { + std::vector<const Record *> FeatureList = Records.getAllDerivedDefinitions("SubtargetFeature"); // Remove features with empty name. @@ -298,11 +301,10 @@ unsigned SubtargetEmitter::FeatureKeyValues( // CPUKeyValues - Emit data of all the subtarget processors. Used by command // line. // -unsigned -SubtargetEmitter::CPUKeyValues(raw_ostream &OS, - const DenseMap<Record *, unsigned> &FeatureMap) { +unsigned SubtargetEmitter::CPUKeyValues(raw_ostream &OS, + const FeatureMapTy &FeatureMap) { // Gather and sort processor information - std::vector<Record *> ProcessorList = + std::vector<const Record *> ProcessorList = Records.getAllDerivedDefinitions("Processor"); llvm::sort(ProcessorList, LessRecordFieldName()); @@ -316,7 +318,7 @@ SubtargetEmitter::CPUKeyValues(raw_ostream &OS, << "extern const llvm::SubtargetSubTypeKV " << Target << "SubTypeKV[] = {\n"; - for (Record *Processor : ProcessorList) { + for (const Record *Processor : ProcessorList) { StringRef Name = Processor->getValueAsString("Name"); RecVec FeatureList = Processor->getValueAsListOfDefs("Features"); RecVec TuneFeatureList = Processor->getValueAsListOfDefs("TuneFeatures"); @@ -347,11 +349,11 @@ SubtargetEmitter::CPUKeyValues(raw_ostream &OS, // of stages. // void SubtargetEmitter::FormItineraryStageString(const std::string &Name, - Record *ItinData, + const Record *ItinData, std::string &ItinString, unsigned &NStages) { // Get states list - RecVec StageList = ItinData->getValueAsListOfDefs("Stages"); + ConstRecVec StageList = ItinData->getValueAsListOfConstDefs("Stages"); // For each stage unsigned N = NStages = StageList.size(); @@ -393,7 +395,7 @@ void SubtargetEmitter::FormItineraryStageString(const std::string &Name, // number of operands that has cycles specified. // void SubtargetEmitter::FormItineraryOperandCycleString( - Record *ItinData, std::string &ItinString, unsigned &NOperandCycles) { + const Record *ItinData, std::string &ItinString, unsigned &NOperandCycles) { // Get operand cycle list std::vector<int64_t> OperandCycleList = ItinData->getValueAsListOfInts("OperandCycles"); @@ -409,10 +411,10 @@ void SubtargetEmitter::FormItineraryOperandCycleString( } void SubtargetEmitter::FormItineraryBypassString(const std::string &Name, - Record *ItinData, + const Record *ItinData, std::string &ItinString, unsigned NOperandCycles) { - RecVec BypassList = ItinData->getValueAsListOfDefs("Bypasses"); + ConstRecVec BypassList = ItinData->getValueAsListOfConstDefs("Bypasses"); unsigned N = BypassList.size(); unsigned i = 0; ListSeparator LS; @@ -434,7 +436,7 @@ void SubtargetEmitter::FormItineraryBypassString(const std::string &Name, void SubtargetEmitter::EmitStageAndOperandCycleData( raw_ostream &OS, std::vector<std::vector<InstrItinerary>> &ProcItinLists) { // Multiple processor models may share an itinerary record. Emit it once. - SmallPtrSet<Record *, 8> ItinsDefSet; + SmallPtrSet<const Record *, 8> ItinsDefSet; // Emit functional units for all the itineraries. for (const CodeGenProcModel &ProcModel : SchedModels.procModels()) { @@ -509,7 +511,7 @@ void SubtargetEmitter::EmitStageAndOperandCycleData( SchedClassIdx < SchedClassEnd; ++SchedClassIdx) { // Next itinerary data - Record *ItinData = ProcModel.ItinDefList[SchedClassIdx]; + const Record *ItinData = ProcModel.ItinDefList[SchedClassIdx]; // Get string and stage count std::string ItinStageString; @@ -610,7 +612,7 @@ void SubtargetEmitter::EmitStageAndOperandCycleData( void SubtargetEmitter::EmitItineraries( raw_ostream &OS, std::vector<std::vector<InstrItinerary>> &ProcItinLists) { // Multiple processor models may share an itinerary record. Emit it once. - SmallPtrSet<Record *, 8> ItinsDefSet; + SmallPtrSet<const Record *, 8> ItinsDefSet; // For each processor's machine model std::vector<std::vector<InstrItinerary>>::iterator ProcItinListsIter = @@ -619,7 +621,7 @@ void SubtargetEmitter::EmitItineraries( PE = SchedModels.procModelEnd(); PI != PE; ++PI, ++ProcItinListsIter) { - Record *ItinsDef = PI->ItinsDef; + const Record *ItinsDef = PI->ItinsDef; if (!ItinsDefSet.insert(ItinsDef).second) continue; @@ -677,12 +679,12 @@ void SubtargetEmitter::EmitProcessorResourceSubUnits( << " 0, // Invalid\n"; for (unsigned i = 0, e = ProcModel.ProcResourceDefs.size(); i < e; ++i) { - Record *PRDef = ProcModel.ProcResourceDefs[i]; + const Record *PRDef = ProcModel.ProcResourceDefs[i]; if (!PRDef->isSubClassOf("ProcResGroup")) continue; RecVec ResUnits = PRDef->getValueAsListOfDefs("Resources"); - for (Record *RUDef : ResUnits) { - Record *const RU = + for (const Record *RUDef : ResUnits) { + const Record *RU = SchedModels.findProcResUnits(RUDef, ProcModel, PRDef->getLoc()); for (unsigned J = 0; J < RU->getValueAsInt("NumUnits"); ++J) { OS << " " << ProcModel.getProcResourceIdx(RU) << ", "; @@ -696,7 +698,7 @@ void SubtargetEmitter::EmitProcessorResourceSubUnits( static void EmitRetireControlUnitInfo(const CodeGenProcModel &ProcModel, raw_ostream &OS) { int64_t ReorderBufferSize = 0, MaxRetirePerCycle = 0; - if (Record *RCU = ProcModel.RetireControlUnit) { + if (const Record *RCU = ProcModel.RetireControlUnit) { ReorderBufferSize = std::max(ReorderBufferSize, RCU->getValueAsInt("ReorderBufferSize")); MaxRetirePerCycle = @@ -744,7 +746,7 @@ SubtargetEmitter::EmitRegisterFileTables(const CodeGenProcModel &ProcModel, // Add entries to the cost table. for (const CodeGenRegisterCost &RC : RF.Costs) { OS << " { "; - Record *Rec = RC.RCDef; + const Record *Rec = RC.RCDef; if (Rec->getValue("Namespace")) OS << Rec->getValueAsString("Namespace") << "::"; OS << Rec->getName() << "RegClassID, " << RC.Cost << ", " @@ -832,16 +834,16 @@ void SubtargetEmitter::EmitProcessorResources(const CodeGenProcModel &ProcModel, unsigned SubUnitsOffset = 1; for (unsigned i = 0, e = ProcModel.ProcResourceDefs.size(); i < e; ++i) { - Record *PRDef = ProcModel.ProcResourceDefs[i]; + const Record *PRDef = ProcModel.ProcResourceDefs[i]; - Record *SuperDef = nullptr; + const Record *SuperDef = nullptr; unsigned SuperIdx = 0; unsigned NumUnits = 0; const unsigned SubUnitsBeginOffset = SubUnitsOffset; int BufferSize = PRDef->getValueAsInt("BufferSize"); if (PRDef->isSubClassOf("ProcResGroup")) { RecVec ResUnits = PRDef->getValueAsListOfDefs("Resources"); - for (Record *RU : ResUnits) { + for (const Record *RU : ResUnits) { NumUnits += RU->getValueAsInt("NumUnits"); SubUnitsOffset += RU->getValueAsInt("NumUnits"); } @@ -875,7 +877,7 @@ void SubtargetEmitter::EmitProcessorResources(const CodeGenProcModel &ProcModel, // Find the WriteRes Record that defines processor resources for this // SchedWrite. -Record * +const Record * SubtargetEmitter::FindWriteResources(const CodeGenSchedRW &SchedWrite, const CodeGenProcModel &ProcModel) { @@ -884,12 +886,12 @@ SubtargetEmitter::FindWriteResources(const CodeGenSchedRW &SchedWrite, if (SchedWrite.TheDef->isSubClassOf("SchedWriteRes")) return SchedWrite.TheDef; - Record *AliasDef = nullptr; - for (Record *A : SchedWrite.Aliases) { + const Record *AliasDef = nullptr; + for (const Record *A : SchedWrite.Aliases) { const CodeGenSchedRW &AliasRW = SchedModels.getSchedRW(A->getValueAsDef("AliasRW")); if (AliasRW.TheDef->getValueInit("SchedModel")->isComplete()) { - Record *ModelDef = AliasRW.TheDef->getValueAsDef("SchedModel"); + const Record *ModelDef = AliasRW.TheDef->getValueAsDef("SchedModel"); if (&SchedModels.getProcModel(ModelDef) != &ProcModel) continue; } @@ -905,11 +907,11 @@ SubtargetEmitter::FindWriteResources(const CodeGenSchedRW &SchedWrite, return AliasDef; // Check this processor's list of write resources. - Record *ResDef = nullptr; - for (Record *WR : ProcModel.WriteResDefs) { + const Record *ResDef = nullptr; + for (const Record *WR : ProcModel.WriteResDefs) { if (!WR->isSubClassOf("WriteRes")) continue; - Record *WRDef = WR->getValueAsDef("WriteType"); + const Record *WRDef = WR->getValueAsDef("WriteType"); if (AliasDef == WRDef || SchedWrite.TheDef == WRDef) { if (ResDef) { PrintFatalError(WR->getLoc(), "Resources are defined for both " @@ -936,19 +938,20 @@ SubtargetEmitter::FindWriteResources(const CodeGenSchedRW &SchedWrite, /// Find the ReadAdvance record for the given SchedRead on this processor or /// return NULL. -Record *SubtargetEmitter::FindReadAdvance(const CodeGenSchedRW &SchedRead, - const CodeGenProcModel &ProcModel) { +const Record * +SubtargetEmitter::FindReadAdvance(const CodeGenSchedRW &SchedRead, + const CodeGenProcModel &ProcModel) { // Check for SchedReads that directly specify a ReadAdvance. if (SchedRead.TheDef->isSubClassOf("SchedReadAdvance")) return SchedRead.TheDef; // Check this processor's list of aliases for SchedRead. - Record *AliasDef = nullptr; - for (Record *A : SchedRead.Aliases) { + const Record *AliasDef = nullptr; + for (const Record *A : SchedRead.Aliases) { const CodeGenSchedRW &AliasRW = SchedModels.getSchedRW(A->getValueAsDef("AliasRW")); if (AliasRW.TheDef->getValueInit("SchedModel")->isComplete()) { - Record *ModelDef = AliasRW.TheDef->getValueAsDef("SchedModel"); + const Record *ModelDef = AliasRW.TheDef->getValueAsDef("SchedModel"); if (&SchedModels.getProcModel(ModelDef) != &ProcModel) continue; } @@ -964,11 +967,11 @@ Record *SubtargetEmitter::FindReadAdvance(const CodeGenSchedRW &SchedRead, return AliasDef; // Check this processor's ReadAdvanceList. - Record *ResDef = nullptr; - for (Record *RA : ProcModel.ReadAdvanceDefs) { + const Record *ResDef = nullptr; + for (const Record *RA : ProcModel.ReadAdvanceDefs) { if (!RA->isSubClassOf("ReadAdvance")) continue; - Record *RADef = RA->getValueAsDef("ReadType"); + const Record *RADef = RA->getValueAsDef("ReadType"); if (AliasDef == RADef || SchedRead.TheDef == RADef) { if (ResDef) { PrintFatalError(RA->getLoc(), "Resources are defined for both " @@ -996,25 +999,25 @@ Record *SubtargetEmitter::FindReadAdvance(const CodeGenSchedRW &SchedRead, // Expand an explicit list of processor resources into a full list of implied // resource groups and super resources that cover them. void SubtargetEmitter::ExpandProcResources( - RecVec &PRVec, std::vector<int64_t> &ReleaseAtCycles, + ConstRecVec &PRVec, std::vector<int64_t> &ReleaseAtCycles, std::vector<int64_t> &AcquireAtCycles, const CodeGenProcModel &PM) { assert(PRVec.size() == ReleaseAtCycles.size() && "failed precondition"); for (unsigned i = 0, e = PRVec.size(); i != e; ++i) { - Record *PRDef = PRVec[i]; - RecVec SubResources; + const Record *PRDef = PRVec[i]; + ConstRecVec SubResources; if (PRDef->isSubClassOf("ProcResGroup")) - SubResources = PRDef->getValueAsListOfDefs("Resources"); + SubResources = PRDef->getValueAsListOfConstDefs("Resources"); else { SubResources.push_back(PRDef); PRDef = SchedModels.findProcResUnits(PRDef, PM, PRDef->getLoc()); - for (Record *SubDef = PRDef; + for (const Record *SubDef = PRDef; SubDef->getValueInit("Super")->isComplete();) { if (SubDef->isSubClassOf("ProcResGroup")) { // Disallow this for simplicitly. PrintFatalError(SubDef->getLoc(), "Processor resource group " " cannot be a super resources."); } - Record *SuperDef = SchedModels.findProcResUnits( + const Record *SuperDef = SchedModels.findProcResUnits( SubDef->getValueAsDef("Super"), PM, SubDef->getLoc()); PRVec.push_back(SuperDef); ReleaseAtCycles.push_back(ReleaseAtCycles[i]); @@ -1022,11 +1025,11 @@ void SubtargetEmitter::ExpandProcResources( SubDef = SuperDef; } } - for (Record *PR : PM.ProcResourceDefs) { + for (const Record *PR : PM.ProcResourceDefs) { if (PR == PRDef || !PR->isSubClassOf("ProcResGroup")) continue; RecVec SuperResources = PR->getValueAsListOfDefs("Resources"); - RecIter SubI = SubResources.begin(), SubE = SubResources.end(); + ConstRecIter SubI = SubResources.begin(), SubE = SubResources.end(); for (; SubI != SubE; ++SubI) { if (!is_contained(SuperResources, *SubI)) { break; @@ -1091,9 +1094,9 @@ void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel, if (!SC.InstRWs.empty()) { // This class has a default ReadWrite list which can be overridden by // InstRW definitions. - Record *RWDef = nullptr; - for (Record *RW : SC.InstRWs) { - Record *RWModelDef = RW->getValueAsDef("SchedModel"); + const Record *RWDef = nullptr; + for (const Record *RW : SC.InstRWs) { + const Record *RWModelDef = RW->getValueAsDef("SchedModel"); if (&ProcModel == &SchedModels.getProcModel(RWModelDef)) { RWDef = RW; break; @@ -1108,7 +1111,7 @@ void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel, } if (Writes.empty()) { // Check this processor's itinerary class resources. - for (Record *I : ProcModel.ItinRWDefs) { + for (const Record *I : ProcModel.ItinRWDefs) { RecVec Matched = I->getValueAsListOfDefs("MatchedItinClasses"); if (is_contained(Matched, SC.ItinClassDef)) { SchedModels.findRWs(I->getValueAsListOfDefs("OperandReadWrites"), @@ -1144,8 +1147,7 @@ void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel, WLEntry.WriteResourceID = WriteID; for (unsigned WS : WriteSeq) { - - Record *WriteRes = + const Record *WriteRes = FindWriteResources(SchedModels.getSchedWrite(WS), ProcModel); // Mark the parent class as invalid for unsupported write types. @@ -1162,7 +1164,8 @@ void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel, SCDesc.RetireOOO |= WriteRes->getValueAsBit("RetireOOO"); // Create an entry for each ProcResource listed in WriteRes. - RecVec PRVec = WriteRes->getValueAsListOfDefs("ProcResources"); + ConstRecVec PRVec = + WriteRes->getValueAsListOfConstDefs("ProcResources"); std::vector<int64_t> ReleaseAtCycles = WriteRes->getValueAsListOfInts("ReleaseAtCycles"); @@ -1261,7 +1264,7 @@ void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel, // Entries must be sorted first by UseIdx then by WriteResourceID. for (unsigned UseIdx = 0, EndIdx = Reads.size(); UseIdx != EndIdx; ++UseIdx) { - Record *ReadAdvance = + const Record *ReadAdvance = FindReadAdvance(SchedModels.getSchedRead(Reads[UseIdx]), ProcModel); if (!ReadAdvance) continue; @@ -1276,7 +1279,7 @@ void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel, if (ValidWrites.empty()) WriteIDs.push_back(0); else { - for (Record *VW : ValidWrites) { + for (const Record *VW : ValidWrites) { unsigned WriteID = SchedModels.getSchedRWIdx(VW, /*IsRead=*/false); assert(WriteID != 0 && "Expected a valid SchedRW in the list of ValidWrites"); @@ -1545,16 +1548,12 @@ void SubtargetEmitter::EmitSchedModel(raw_ostream &OS) { EmitProcessorModels(OS); } -static void emitPredicateProlog(RecordKeeper &Records, raw_ostream &OS) { +static void emitPredicateProlog(const RecordKeeper &Records, raw_ostream &OS) { std::string Buffer; raw_string_ostream Stream(Buffer); - // Collect all the PredicateProlog records and print them to the output - // stream. - std::vector<Record *> Prologs = - Records.getAllDerivedDefinitions("PredicateProlog"); - llvm::sort(Prologs, LessRecord()); - for (Record *P : Prologs) + // Print all PredicateProlog records to the output stream. + for (const Record *P : Records.getAllDerivedDefinitions("PredicateProlog")) Stream << P->getValueAsString("Code") << '\n'; OS << Buffer; @@ -1875,9 +1874,8 @@ void SubtargetEmitter::emitGetMacroFusions(const std::string &ClassName, // Produces a subtarget specific function for parsing // the subtarget features string. void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS) { - std::vector<Record *> Features = + ArrayRef<const Record *> Features = Records.getAllDerivedDefinitions("SubtargetFeature"); - llvm::sort(Features, LessRecord()); OS << "// ParseSubtargetFeatures - Parses features string setting specified\n" << "// subtarget options.\n" @@ -1901,7 +1899,7 @@ void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS) { OS << " InitMCProcessorInfo(CPU, TuneCPU, FS);\n" << " const FeatureBitset &Bits = getFeatureBits();\n"; - for (Record *R : Features) { + for (const Record *R : Features) { // Next record StringRef Instance = R->getName(); StringRef Value = R->getValueAsString("Value"); @@ -1992,28 +1990,20 @@ void SubtargetEmitter::run(raw_ostream &OS) { OS << "\n#ifdef GET_SUBTARGETINFO_ENUM\n"; OS << "#undef GET_SUBTARGETINFO_ENUM\n\n"; - DenseMap<Record *, unsigned> FeatureMap; - OS << "namespace llvm {\n"; - Enumeration(OS, FeatureMap); + auto FeatureMap = Enumeration(OS); OS << "} // end namespace llvm\n\n"; OS << "#endif // GET_SUBTARGETINFO_ENUM\n\n"; EmitSubtargetInfoMacroCalls(OS); OS << "namespace llvm {\n"; -#if 0 - OS << "namespace {\n"; -#endif unsigned NumFeatures = FeatureKeyValues(OS, FeatureMap); OS << "\n"; EmitSchedModel(OS); OS << "\n"; unsigned NumProcs = CPUKeyValues(OS, FeatureMap); OS << "\n"; -#if 0 - OS << "} // end anonymous namespace\n\n"; -#endif // MCInstrInfo initialization routine. emitGenMCSubtargetInfo(OS); diff --git a/llvm/utils/TableGen/X86ManualFoldTables.def b/llvm/utils/TableGen/X86ManualFoldTables.def index 2ebd928..4a58dea 100644 --- a/llvm/utils/TableGen/X86ManualFoldTables.def +++ b/llvm/utils/TableGen/X86ManualFoldTables.def @@ -31,18 +31,18 @@ NOFOLD(VCOMPRESSPSZrrk) NOFOLD(VCVTPS2PHZ128rrk) NOFOLD(VCVTPS2PHZ256rrk) NOFOLD(VCVTPS2PHZrrk) -NOFOLD(VEXTRACTF32x4Z256rrk) -NOFOLD(VEXTRACTF32x4Zrrk) -NOFOLD(VEXTRACTF32x8Zrrk) -NOFOLD(VEXTRACTF64x2Z256rrk) -NOFOLD(VEXTRACTF64x2Zrrk) -NOFOLD(VEXTRACTF64x4Zrrk) -NOFOLD(VEXTRACTI32x4Z256rrk) -NOFOLD(VEXTRACTI32x4Zrrk) -NOFOLD(VEXTRACTI32x8Zrrk) -NOFOLD(VEXTRACTI64x2Z256rrk) -NOFOLD(VEXTRACTI64x2Zrrk) -NOFOLD(VEXTRACTI64x4Zrrk) +NOFOLD(VEXTRACTF32x4Z256rrik) +NOFOLD(VEXTRACTF32x4Zrrik) +NOFOLD(VEXTRACTF32x8Zrrik) +NOFOLD(VEXTRACTF64x2Z256rrik) +NOFOLD(VEXTRACTF64x2Zrrik) +NOFOLD(VEXTRACTF64x4Zrrik) +NOFOLD(VEXTRACTI32x4Z256rrik) +NOFOLD(VEXTRACTI32x4Zrrik) +NOFOLD(VEXTRACTI32x8Zrrik) +NOFOLD(VEXTRACTI64x2Z256rrik) +NOFOLD(VEXTRACTI64x2Zrrik) +NOFOLD(VEXTRACTI64x4Zrrik) NOFOLD(VMOVAPDZ128mrk) NOFOLD(VMOVAPDZ256mrk) NOFOLD(VMOVAPDZmrk) @@ -215,18 +215,18 @@ NOFOLD(UD1Wr) // Exclude these two b/c they would conflict with {MMX_MOVD64from64rr, MMX_MOVQ64mr} in unfolding table NOFOLD(MMX_MOVQ64rr) NOFOLD(MMX_MOVQ64rr_REV) -// INSERTPSrm has no count_s while INSERTPSrr has count_s. +// INSERTPSrmi has no count_s while INSERTPSrri has count_s. // count_s is to indicate which element in dst vector is inserted. -// if count_s!=0, we can not fold INSERTPSrr into INSERTPSrm +// if count_s!=0, we can not fold INSERTPSrri into INSERTPSrmi // // the following folding can happen when count_s==0 // load xmm0, m32 -// insertpsrr xmm1, xmm0, imm +// INSERTPSrri xmm1, xmm0, imm // => -// insertpsrm xmm1, m32, imm -NOFOLD(INSERTPSrr) -NOFOLD(VINSERTPSZrr) -NOFOLD(VINSERTPSrr) +// INSERTPSrmi xmm1, m32, imm +NOFOLD(INSERTPSrri) +NOFOLD(VINSERTPSZrri) +NOFOLD(VINSERTPSrri) // Memory faults are suppressed for CFCMOV with memory operand. NOFOLD(CFCMOV16rr_REV) NOFOLD(CFCMOV32rr_REV) diff --git a/llvm/utils/TableGen/X86ManualInstrMapping.def b/llvm/utils/TableGen/X86ManualInstrMapping.def index f0154b80..d76c404 100644 --- a/llvm/utils/TableGen/X86ManualInstrMapping.def +++ b/llvm/utils/TableGen/X86ManualInstrMapping.def @@ -81,14 +81,14 @@ ENTRY(VMPSADBWZ128rmi, VMPSADBWrmi) ENTRY(VMPSADBWZ128rri, VMPSADBWrri) ENTRY(VMPSADBWZ256rmi, VMPSADBWYrmi) ENTRY(VMPSADBWZ256rri, VMPSADBWYrri) -ENTRY(VSHUFF32X4Z256rmi, VPERM2F128rm) -ENTRY(VSHUFF32X4Z256rri, VPERM2F128rr) -ENTRY(VSHUFF64X2Z256rmi, VPERM2F128rm) -ENTRY(VSHUFF64X2Z256rri, VPERM2F128rr) -ENTRY(VSHUFI32X4Z256rmi, VPERM2I128rm) -ENTRY(VSHUFI32X4Z256rri, VPERM2I128rr) -ENTRY(VSHUFI64X2Z256rmi, VPERM2I128rm) -ENTRY(VSHUFI64X2Z256rri, VPERM2I128rr) +ENTRY(VSHUFF32X4Z256rmi, VPERM2F128rmi) +ENTRY(VSHUFF32X4Z256rri, VPERM2F128rri) +ENTRY(VSHUFF64X2Z256rmi, VPERM2F128rmi) +ENTRY(VSHUFF64X2Z256rri, VPERM2F128rri) +ENTRY(VSHUFI32X4Z256rmi, VPERM2I128rmi) +ENTRY(VSHUFI32X4Z256rri, VPERM2I128rri) +ENTRY(VSHUFI64X2Z256rmi, VPERM2I128rmi) +ENTRY(VSHUFI64X2Z256rri, VPERM2I128rri) // W bit does not match ENTRY(VADDPDZ128rm, VADDPDrm) ENTRY(VADDPDZ128rr, VADDPDrr) @@ -245,14 +245,14 @@ ENTRY(VCVTTPD2DQZ256rm, VCVTTPD2DQYrm) ENTRY(VCVTTPD2DQZ256rr, VCVTTPD2DQYrr) ENTRY(VDIVPDZ256rm, VDIVPDYrm) ENTRY(VDIVPDZ256rr, VDIVPDYrr) -ENTRY(VEXTRACTF64x2Z256mr, VEXTRACTF128mr) -ENTRY(VEXTRACTF64x2Z256rr, VEXTRACTF128rr) -ENTRY(VEXTRACTI64x2Z256mr, VEXTRACTI128mr) -ENTRY(VEXTRACTI64x2Z256rr, VEXTRACTI128rr) -ENTRY(VINSERTF64x2Z256rm, VINSERTF128rm) -ENTRY(VINSERTF64x2Z256rr, VINSERTF128rr) -ENTRY(VINSERTI64x2Z256rm, VINSERTI128rm) -ENTRY(VINSERTI64x2Z256rr, VINSERTI128rr) +ENTRY(VEXTRACTF64x2Z256mr, VEXTRACTF128mri) +ENTRY(VEXTRACTF64x2Z256rr, VEXTRACTF128rri) +ENTRY(VEXTRACTI64x2Z256mr, VEXTRACTI128mri) +ENTRY(VEXTRACTI64x2Z256rr, VEXTRACTI128rri) +ENTRY(VINSERTF64x2Z256rm, VINSERTF128rmi) +ENTRY(VINSERTF64x2Z256rr, VINSERTF128rri) +ENTRY(VINSERTI64x2Z256rm, VINSERTI128rmi) +ENTRY(VINSERTI64x2Z256rr, VINSERTI128rri) ENTRY(VMAXCPDZ256rm, VMAXCPDYrm) ENTRY(VMAXCPDZ256rr, VMAXCPDYrr) ENTRY(VMAXPDZ256rm, VMAXPDYrm) diff --git a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn index a6d1204..91d547d 100644 --- a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn +++ b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn @@ -628,6 +628,7 @@ if (current_toolchain == default_toolchain) { "__memory/temporary_buffer.h", "__memory/uninitialized_algorithms.h", "__memory/unique_ptr.h", + "__memory/unique_temporary_buffer.h", "__memory/uses_allocator.h", "__memory/uses_allocator_construction.h", "__memory/voidify.h", diff --git a/llvm/utils/gn/secondary/llvm/lib/Transforms/Instrumentation/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Transforms/Instrumentation/BUILD.gn index 6c29ed7..523e82d 100644 --- a/llvm/utils/gn/secondary/llvm/lib/Transforms/Instrumentation/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/Transforms/Instrumentation/BUILD.gn @@ -21,7 +21,6 @@ static_library("Instrumentation") { "IndirectCallPromotion.cpp", "InstrOrderFile.cpp", "InstrProfiling.cpp", - "Instrumentation.cpp", "KCFI.cpp", "LowerAllowCheckPass.cpp", "MemProfiler.cpp", diff --git a/llvm/utils/gn/secondary/llvm/lib/Transforms/Utils/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Transforms/Utils/BUILD.gn index 55e25e0..c46b969 100644 --- a/llvm/utils/gn/secondary/llvm/lib/Transforms/Utils/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/Transforms/Utils/BUILD.gn @@ -43,6 +43,7 @@ static_library("Utils") { "InjectTLIMappings.cpp", "InlineFunction.cpp", "InstructionNamer.cpp", + "Instrumentation.cpp", "IntegerDivision.cpp", "LCSSA.cpp", "LibCallsShrinkWrap.cpp", diff --git a/mlir/docs/Dialects/OpenMPDialect/ODS.md b/mlir/docs/Dialects/OpenMPDialect/ODS.md new file mode 100644 index 0000000..51c61c8 --- /dev/null +++ b/mlir/docs/Dialects/OpenMPDialect/ODS.md @@ -0,0 +1,3 @@ +# ODS Documentation + +[include "Dialects/OpenMPDialect.md"] diff --git a/mlir/docs/Dialects/OpenMPDialect/_index.md b/mlir/docs/Dialects/OpenMPDialect/_index.md new file mode 100644 index 0000000..7a3837a --- /dev/null +++ b/mlir/docs/Dialects/OpenMPDialect/_index.md @@ -0,0 +1,409 @@ +# 'omp' Dialect + +The `omp` dialect is for representing directives, clauses and other definitions +of the [OpenMP programming model](https://www.openmp.org). This directive-based +programming model, defined for the C, C++ and Fortran programming languages, +provides abstractions to simplify the development of parallel and accelerated +programs. All versions of the OpenMP specification can be found +[here](https://www.openmp.org/specifications/). + +Operations in this MLIR dialect generally correspond to a single OpenMP +directive, taking arguments that represent their supported clauses, though this +is not always the case. For a detailed information of operations, types and +other definitions in this dialect, refer to the automatically-generated +[ODS Documentation](ODS.md). + +[TOC] + +## Operation Naming Conventions + +This section aims to standardize how dialect operation names are chosen, to +ensure a level of consistency. There are two categories of names: tablegen names +and assembly names. The former also corresponds to the C++ class that is +generated for the operation, whereas the latter is used to represent it in MLIR +text form. + +Tablegen names are CamelCase, with the first letter capitalized and an "Op" +suffix, whereas assembly names are snake_case, with all lowercase letters and +words separated by underscores. + +If the operation corresponds to a directive, clause or other kind of definition +in the OpenMP specification, it must use the same name split into words in the +same way. For example, the `target data` directive would become `TargetDataOp` / +`omp.target_data`, whereas `taskloop` would become `TaskloopOp` / +`omp.taskloop`. + +Operations intended to carry extra information for another particular operation +or clause must be named after that other operation or clause, followed by the +name of the additional information. The assembly name must use a period to +separate both parts. For example, the operation used to define some extra +mapping information is named `MapInfoOp` / `omp.map.info`. The same rules are +followed if multiple operations are created for different variants of the same +directive, e.g. `atomic` becomes `Atomic{Read,Write,Update,Capture}Op` / +`omp.atomic.{read,write,update,capture}`. + +## Clause-Based Operation Definition + +One main feature of the OpenMP specification is that, even though the set of +clauses that could be applied to a given directive is independent from other +directives, these clauses can generally apply to multiple directives. Since +clauses usually define which arguments the corresponding MLIR operation takes, +it is possible (and preferred) to define OpenMP dialect operations based on the +list of clauses taken by the corresponding directive. This makes it simpler to +keep their representation consistent across operations and minimizes redundancy +in the dialect. + +To achieve this, the base `OpenMP_Clause` tablegen class has been created. It is +intended to be used to create clause definitions that can be then attached to +multiple `OpenMP_Op` definitions, resulting in the latter inheriting by default +all properties defined by clauses attached, similarly to the trait mechanism. +This mechanism is implemented in +[OpenMPOpBase.td](https://github.com/llvm/llvm-project/blob/main/mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td). + +### Adding a Clause + +OpenMP clause definitions are located in +[OpenMPClauses.td](https://github.com/llvm/llvm-project/blob/main/mlir/include/mlir/Dialect/OpenMP/OpenMPClauses.td). +For each clause, an `OpenMP_Clause` subclass and a definition based on it must +be created. The subclass must take a `bit` template argument for each of the +properties it can populate on associated `OpenMP_Op`s. These must be forwarded +to the base class. The definition must be an instantiation of the base class +where all these template arguments are set to `false`. The definition's name +must be `OpenMP_<Name>Clause`, whereas its base class' must be +`OpenMP_<Name>ClauseSkip`. Following this pattern makes it possible to +optionally skip the inheritance of some properties when defining operations: +[more info](#overriding-clause-inherited-properties). + +Clauses can define the following properties: + - `list<Traits> traits`: To be used when having a certain clause always +implies some op trait, like the `map` clause and the `MapClauseOwningInterface`. + - `dag(ins) arguments`: Mandatory property holding values and attributes +used to represent the clause. Argument names use snake_case and should contain +the clause name to avoid name clashes between clauses. Variadic arguments +(non-attributes) must contain the "_vars" suffix. + - `string {req,opt}AssemblyFormat`: Optional formatting strings to produce +custom human-friendly printers and parsers for arguments associated with the +clause. It will be combined with assembly formats for other clauses as explained +[below](#adding-an-operation). + - `string description`: Optional description text to describe the clause and +its representation. + - `string extraClassDeclaration`: Optional C++ declarations to be added to +operation classes including the clause. + +For example: + +```tablegen +class OpenMP_ExampleClauseSkip< + bit traits = false, bit arguments = false, bit assemblyFormat = false, + bit description = false, bit extraClassDeclaration = false + > : OpenMP_Clause<traits, arguments, assemblyFormat, description, + extraClassDeclaration> { + let arguments = (ins + Optional<AnyType>:$example_var + ); + + let optAssemblyFormat = [{ + `example` `(` $example_var `:` type($example_var) `)` + }]; + + let description = [{ + The `example_var` argument defines the variable to which the EXAMPLE clause + applies. + }]; +} + +def OpenMP_ExampleClause : OpenMP_ExampleClauseSkip<>; +``` + +### Adding an Operation + +Operations in the OpenMP dialect, located in +[OpenMPOps.td](https://github.com/llvm/llvm-project/blob/main/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td), +can be defined like any other regular operation by just specifying a `mnemonic` +and optional list of `traits` when inheriting from `OpenMP_Op`, and then +defining the expected `description`, `arguments`, etc. properties inside of its +body. However, in most cases, basing the operation definition on its list of +accepted clauses is significantly simpler because some of the properties can +just be inherited from these clauses. + +In general, the way to achieve this is to specify, in addition to the `mnemonic` +and optional list of `traits`, a list of `clauses` where all the applicable +`OpenMP_<Name>Clause` definitions are added. Then, the only properties that +would have to be defined in the operation's body are the `summary` and +`description`. For the latter, only the operation itself would have to be +defined, and the description for its clause-inherited arguments is appended +through the inherited `clausesDescription` property. + +If the operation is intended to have a single region, this is better achieved by +setting the `singleRegion=true` template argument of `OpenMP_Op` rather manually +populating the `regions` property of the operation, because that way the default +`assemblyFormat` is also updated correspondingly. + +For example: + +```tablegen +def ExampleOp : OpenMP_Op<"example", traits = [ + AttrSizedOperandSegments, ... + ], clauses = [ + OpenMP_AlignedClause, OpenMP_IfClause, OpenMP_LinearClause, ... + ], singleRegion = true> { + let summary = "example construct"; + let description = [{ + The example construct represents... + }] # clausesDescription; +} +``` + +This is possible because the `arguments`, `assemblyFormat` and +`extraClassDeclaration` properties of the operation are by default +populated by concatenating the corresponding properties of the clauses on the +list. In the case of the `assemblyFormat`, this involves combining the +`reqAssemblyFormat` and the `optAssemblyFormat` properties. The +`reqAssemblyFormat` of all clauses is concatenated first and separated using +spaces, whereas the `optAssemblyFormat` is wrapped in an `oilist()` and +interleaved with "|" instead of spaces. The resulting `assemblyFormat` contains +the required assembly format strings, followed by the optional assembly format +strings, optionally the `$region` and the `attr-dict`. + +### Overriding Clause-Inherited Properties + +Although the clause-based definition of operations can greatly reduce work, it's +also somewhat restrictive, since there may be some situations where only part of +the operation definition can be automated in that manner. For a fine-grained +control over properties inherited from each clause two features are available: + + - Inhibition of properties. By using `OpenMP_<Name>ClauseSkip` tablegen +classes, the list of properties copied from the clause to the operation can be +selected. For example, `OpenMP_IfClauseSkip<assemblyFormat = true>` would result +in every property defined for the `OpenMP_IfClause` except for the +`assemblyFormat` being used to initially populate the properties of the +operation. + - Augmentation of properties. There are times when there is a need to add to +a clause-populated operation property. Instead of overriding the property in the +definition of the operation and having to manually replicate what would +otherwise be automatically populated before adding to it, some internal +properties are defined to hold this default value: `clausesArgs`, +`clausesAssemblyFormat`, `clauses{Req,Opt}AssemblyFormat` and +`clausesExtraClassDeclaration`. + +In the following example, assuming both the `OpenMP_InReductionClause` and the +`OpenMP_ReductionClause` define a `getReductionVars` extra class declaration, +we skip the conflicting `extraClassDeclaration`s inherited by both clauses and +provide another implementation, without having to also re-define other +declarations inherited from the `OpenMP_AllocateClause`: + +```tablegen +def ExampleOp : OpenMP_Op<"example", traits = [ + AttrSizedOperandSegments, ... + ], clauses = [ + OpenMP_AllocateClause, + OpenMP_InReductionClauseSkip<extraClassDeclaration = true>, + OpenMP_ReductionClauseSkip<extraClassDeclaration = true> + ], singleRegion = true> { + let summary = "example construct"; + let description = [{ + This operation represents... + }] # clausesDescription; + + // Override the clause-populated extraClassDeclaration and add the default + // back via appending clausesExtraClassDeclaration to it. This has the effect + // of adding one declaration. Since this property is skipped for the + // InReduction and Reduction clauses, clausesExtraClassDeclaration won't + // incorporate the definition of this property for these clauses. + let extraClassDeclaration = [{ + SmallVector<Value> getReductionVars() { + // Concatenate inReductionVars and reductionVars and return the result... + } + }] # clausesExtraClassDeclaration; +} +``` + +These features are intended for complex edge cases, but an effort should be made +to avoid having to use them, since they may introduce inconsistencies and +complexity to the dialect. + +### Tablegen Verification Pass + +As a result of the implicit way in which fundamental properties of MLIR +operations are populated following this approach, and the ability to override +them, forgetting to append clause-inherited values might result in hard to debug +tablegen errors. + +For this reason, the `-verify-openmp-ops` tablegen pseudo-backend was created. +It runs before any other tablegen backends are triggered for the +[OpenMPOps.td](https://github.com/llvm/llvm-project/blob/main/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td) +file and warns any time a property defined for a clause is not found in the +corresponding operation, except if it is explicitly skipped as described +[above](#overriding-clause-inherited-properties). This way, in case of a later +tablegen failure while processing OpenMP dialect operations, earlier messages +triggered by that pass can point to a likely solution. + +## Loop-Associated Directives + +Loop-associated OpenMP constructs are represented in the dialect as loop wrapper +operations. These implement the `LoopWrapperInterface`, which enforces a series +of restrictions upon the operation: + - It contains a single region with a single block; and + - Its block contains exactly two operations: another loop wrapper or +`omp.loop_nest` operation and a terminator. + +This approach splits the representation for a loop nest and the loop-associated +constructs that specify how its iterations are executed, possibly across various +SIMD lanes (`omp.simd`), threads (`omp.wsloop`), teams of threads +(`omp.distribute`) or tasks (`omp.taskloop`). The ability to directly nest +multiple loop wrappers to impact the execution of a single loop nest is used to +represent composite constructs in a modular way. + +The `omp.loop_nest` operation represents a collapsed rectangular loop nest that +must always be wrapped by at least one loop wrapper, which defines how it is +intended to be executed. It serves as a simpler and more restrictive +representation of OpenMP loops while a more general approach to support +non-rectangular loop nests, loop transformations and non-perfectly nested loops +based on a new `omp.canonical_loop` definition is developed. + +The following example shows how a `parallel {do,for}` construct would be +represented: +```mlir +omp.parallel ... { + ... + omp.wsloop ... { + omp.loop_nest (%i) : index = (%lb) to (%ub) step (%step) { + %a = load %a[%i] : memref<?xf32> + %b = load %b[%i] : memref<?xf32> + %sum = arith.addf %a, %b : f32 + store %sum, %c[%i] : memref<?xf32> + omp.yield + } + omp.terminator + } + ... + omp.terminator +} +``` + +### Loop Transformations + +In addition to the worksharing loop-associated constructs described above, the +OpenMP specification also defines a set of loop transformation constructs. They +replace the associated loop(s) before worksharing constructs are executed on the +generated loop(s). Some examples of such constructs are `tile` and `unroll`. + +A general approach for representing these types of OpenMP constructs has not yet +been implemented, but it is closely linked to the `omp.canonical_loop` work. +Nevertheless, loop transformation that the `collapse` clause for loop-associated +worksharing constructs defines can be represented by introducing multiple +bounds, step and induction variables to the `omp.loop_nest` operation. + +## Compound Construct Representation + +The OpenMP specification defines certain shortcuts that allow specifying +multiple constructs in a single directive, which are referred to as compound +constructs (e.g. `parallel do` contains the `parallel` and `do` constructs). +These can be further classified into [combined](#combined-constructs) and +[composite](#composite-constructs) constructs. This section describes how they +are represented in the dialect. + +When clauses are specified for compound constructs, the OpenMP specification +defines a set of rules to decide to which leaf constructs they apply, as well as +potentially introducing some other implicit clauses. These rules must be taken +into account by those creating the MLIR representation, since it is a per-leaf +representation that expects these rules to have already been followed. + +### Combined Constructs + +Combined constructs are semantically equivalent to specifying one construct +immediately nested inside another. This property is used to simplify the dialect +by representing them through the operations associated to each leaf construct. +For example, `target teams` would be represented as follows: + +```mlir +omp.target ... { + ... + omp.teams ... { + ... + omp.terminator + } + ... + omp.terminator +} +``` + +### Composite Constructs + +Composite constructs are similar to combined constructs in that they specify the +effect of one construct being applied immediately after another. However, they +group together constructs that cannot be directly nested into each other. +Specifically, they group together multiple loop-associated constructs that apply +to the same collapsed loop nest. + +As of version 5.2 of the OpenMP specification, the list of composite constructs +is the following: + - `{do,for} simd`; + - `distribute simd`; + - `distribute parallel {do,for}`; + - `distribute parallel {do,for} simd`; and + - `taskloop simd`. + +Even though the list of composite constructs is relatively short and it would +also be possible to create dialect operations for each, it was decided to +allow attaching multiple loop wrappers to a single loop instead. This minimizes +redundancy in the dialect and maximizes its modularity, since there is a single +operation for each leaf construct regardless of whether it can be part of a +composite construct. On the other hand, this means the `omp.loop_nest` operation +will have to be interpreted differently depending on how many and which loop +wrappers are attached to it. + +To simplify the detection of operations taking part in the representation of a +composite construct, the `ComposableOpInterface` was introduced. Its purpose is +to handle the `omp.composite` discardable dialect attribute that can optionally +be attached to these operations. Operation verifiers will ensure its presence is +consistent with the context the operation appears in, so that it is valid when +the attribute is present if and only if it represents a leaf of a composite +construct. + +For example, the `distribute simd` composite construct is represented as +follows: + +```mlir +omp.distribute ... { + omp.simd ... { + omp.loop_nest (%i) : index = (%lb) to (%ub) step (%step) { + ... + omp.yield + } + omp.terminator + } {omp.composite} + omp.terminator +} {omp.composite} +``` + +One exception to this is the representation of the +`distribute parallel {do,for}` composite construct. The presence of a +block-associated `parallel` leaf construct would introduce many problems if it +was allowed to work as a loop wrapper. In this case, the "hoisted `omp.parallel` +representation" is used instead. This consists in making `omp.parallel` the +parent operation, with a nested `omp.loop_nest` wrapped by `omp.distribute` and +`omp.wsloop` (and `omp.simd`, in the `distribute parallel {do,for} simd` case). + +This approach works because `parallel` is a parallelism-generating construct, +whereas `distribute` is a worksharing construct impacting the higher level +`teams` construct, making the ordering between these constructs not cause +semantic mismatches. This property is also exploited by LLVM's SPMD-mode. + +```mlir +omp.parallel ... { + ... + omp.distribute ... { + omp.wsloop ... { + omp.loop_nest (%i) : index = (%lb) to (%ub) step (%step) { + ... + omp.yield + } + omp.terminator + } {omp.composite} + omp.terminator + } {omp.composite} + ... + omp.terminator +} {omp.composite} +``` diff --git a/mlir/lib/AsmParser/Parser.cpp b/mlir/lib/AsmParser/Parser.cpp index 2e4c4a3..83eec32 100644 --- a/mlir/lib/AsmParser/Parser.cpp +++ b/mlir/lib/AsmParser/Parser.cpp @@ -2412,14 +2412,15 @@ ParseResult OperationParser::parseOptionalBlockArgList(Block *owner) { //===----------------------------------------------------------------------===// ParseResult OperationParser::codeCompleteSSAUse() { - std::string detailData; - llvm::raw_string_ostream detailOS(detailData); for (IsolatedSSANameScope &scope : isolatedNameScopes) { for (auto &it : scope.values) { if (it.second.empty()) continue; Value frontValue = it.second.front().value; + std::string detailData; + llvm::raw_string_ostream detailOS(detailData); + // If the value isn't a forward reference, we also add the name of the op // to the detail. if (auto result = dyn_cast<OpResult>(frontValue)) { @@ -2440,7 +2441,7 @@ ParseResult OperationParser::codeCompleteSSAUse() { detailOS << ", ..."; state.codeCompleteContext->appendSSAValueCompletion( - it.getKey(), std::move(detailOS.str())); + it.getKey(), std::move(detailData)); } } diff --git a/mlir/lib/Bindings/Python/IRAttributes.cpp b/mlir/lib/Bindings/Python/IRAttributes.cpp index bfdd4a5..ead81a7 100644 --- a/mlir/lib/Bindings/Python/IRAttributes.cpp +++ b/mlir/lib/Bindings/Python/IRAttributes.cpp @@ -708,7 +708,7 @@ public: llvm::raw_string_ostream os(message); os << "Expected a static ShapedType for the shaped_type parameter: " << py::repr(py::cast(*explicitType)); - throw py::value_error(os.str()); + throw py::value_error(message); } shapedType = *explicitType; } else { @@ -732,7 +732,7 @@ public: os << "All attributes must be of the same type and match " << "the type parameter: expected=" << py::repr(py::cast(shapedType)) << ", but got=" << py::repr(py::cast(attrType)); - throw py::value_error(os.str()); + throw py::value_error(message); } } diff --git a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp index b4da504..877bd22 100644 --- a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp +++ b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp @@ -1790,9 +1790,8 @@ bool hasDuplicateDeviceTypes( return false; for (auto attr : *segments) { auto deviceTypeAttr = mlir::dyn_cast<mlir::acc::DeviceTypeAttr>(attr); - if (deviceTypes.contains(deviceTypeAttr.getValue())) + if (!deviceTypes.insert(deviceTypeAttr.getValue()).second) return true; - deviceTypes.insert(deviceTypeAttr.getValue()); } return false; } @@ -1807,9 +1806,8 @@ LogicalResult checkDeviceTypes(mlir::ArrayAttr deviceTypes) { mlir::dyn_cast_or_null<mlir::acc::DeviceTypeAttr>(attr); if (!deviceTypeAttr) return failure(); - if (crtDeviceTypes.contains(deviceTypeAttr.getValue())) + if (!crtDeviceTypes.insert(deviceTypeAttr.getValue()).second) return failure(); - crtDeviceTypes.insert(deviceTypeAttr.getValue()); } return success(); } diff --git a/mlir/lib/Dialect/SCF/IR/SCF.cpp b/mlir/lib/Dialect/SCF/IR/SCF.cpp index bfa7db8..6d47ff3 100644 --- a/mlir/lib/Dialect/SCF/IR/SCF.cpp +++ b/mlir/lib/Dialect/SCF/IR/SCF.cpp @@ -4084,10 +4084,8 @@ static std::optional<SmallVector<unsigned>> getArgsMapping(ValueRange args1, static bool hasDuplicates(ValueRange args) { llvm::SmallDenseSet<Value> set; for (Value arg : args) { - if (set.contains(arg)) + if (!set.insert(arg).second) return true; - - set.insert(arg); } return false; } diff --git a/mlir/lib/TableGen/CodeGenHelpers.cpp b/mlir/lib/TableGen/CodeGenHelpers.cpp index 7e76403..2f13887 100644 --- a/mlir/lib/TableGen/CodeGenHelpers.cpp +++ b/mlir/lib/TableGen/CodeGenHelpers.cpp @@ -315,5 +315,5 @@ std::string mlir::tblgen::escapeString(StringRef value) { std::string ret; llvm::raw_string_ostream os(ret); os.write_escaped(value); - return os.str(); + return ret; } diff --git a/mlir/lib/TableGen/Predicate.cpp b/mlir/lib/TableGen/Predicate.cpp index 8beb772..3c3c475 100644 --- a/mlir/lib/TableGen/Predicate.cpp +++ b/mlir/lib/TableGen/Predicate.cpp @@ -301,7 +301,7 @@ static std::string combineBinary(ArrayRef<std::string> children, for (unsigned i = 1; i < size; ++i) { os << ' ' << combiner << " (" << children[i] << ')'; } - return os.str(); + return str; } // Prepend negation to the only condition in the predicate expression list. diff --git a/mlir/lib/Target/LLVM/ModuleToObject.cpp b/mlir/lib/Target/LLVM/ModuleToObject.cpp index d94c10d..7739134 100644 --- a/mlir/lib/Target/LLVM/ModuleToObject.cpp +++ b/mlir/lib/Target/LLVM/ModuleToObject.cpp @@ -182,7 +182,7 @@ ModuleToObject::translateToISA(llvm::Module &llvmModule, codegenPasses.run(llvmModule); } - return stream.str(); + return targetISA; } void ModuleToObject::setDataLayoutAndTriple(llvm::Module &module) { diff --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp index 7e2da1e..0784c3c 100644 --- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp +++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp @@ -50,7 +50,7 @@ void registerFromLLVMIRTranslation() { std::string errStr; llvm::raw_string_ostream errStream(errStr); err.print(/*ProgName=*/"", errStream); - emitError(UnknownLoc::get(context)) << errStream.str(); + emitError(UnknownLoc::get(context)) << errStr; return {}; } if (llvm::verifyModule(*llvmModule, &llvm::errs())) diff --git a/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp index bdb15a2..d948ff5 100644 --- a/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp @@ -62,7 +62,7 @@ static std::string diagStr(const llvm::Type *type) { std::string str; llvm::raw_string_ostream os(str); type->print(os); - return os.str(); + return str; } /// Get the declaration of an overloaded llvm intrinsic. First we get the diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMPCommon.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMPCommon.cpp index 2145b95..1595bd2 100644 --- a/mlir/lib/Target/LLVMIR/Dialect/OpenMPCommon.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMPCommon.cpp @@ -25,7 +25,7 @@ mlir::LLVM::createSourceLocStrFromLocation(Location loc, std::string locStr; llvm::raw_string_ostream locOS(locStr); locOS << loc; - return builder.getOrCreateSrcLocStr(locOS.str(), strLen); + return builder.getOrCreateSrcLocStr(locStr, strLen); } llvm::Constant * diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp index d1732cb..21f2050 100644 --- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp @@ -55,7 +55,7 @@ static std::string diag(const llvm::Value &value) { std::string str; llvm::raw_string_ostream os(str); os << value; - return os.str(); + return str; } // Utility to print an LLVM metadata node as a string for passing @@ -66,7 +66,7 @@ static std::string diagMD(const llvm::Metadata *node, std::string str; llvm::raw_string_ostream os(str); node->print(os, module, /*IsForDebug=*/true); - return os.str(); + return str; } /// Returns the name of the global_ctors global variables. diff --git a/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp b/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp index 6c28c04..4c15523 100644 --- a/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp +++ b/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp @@ -137,7 +137,7 @@ Serializer::processSpecConstantOperationOp(spirv::SpecConstantOperationOp op) { std::string enclosedOpName; llvm::raw_string_ostream rss(enclosedOpName); rss << "Op" << enclosedOp.getName().stripDialect(); - auto enclosedOpcode = spirv::symbolizeOpcode(rss.str()); + auto enclosedOpcode = spirv::symbolizeOpcode(enclosedOpName); if (!enclosedOpcode) { op.emitError("Couldn't find op code for op ") diff --git a/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp b/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp index b0feda05..7719eb6 100644 --- a/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp +++ b/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp @@ -915,7 +915,7 @@ uint32_t Serializer::prepareConstantInt(Location loc, IntegerAttr intAttr, value.print(rss, /*isSigned=*/false); emitError(loc, "cannot serialize ") - << bitwidth << "-bit integer literal: " << rss.str(); + << bitwidth << "-bit integer literal: " << valueStr; return 0; } } @@ -968,7 +968,7 @@ uint32_t Serializer::prepareConstantFp(Location loc, FloatAttr floatAttr, value.print(rss); emitError(loc, "cannot serialize ") - << floatAttr.getType() << "-typed float literal: " << rss.str(); + << floatAttr.getType() << "-typed float literal: " << valueStr; return 0; } diff --git a/mlir/lib/Tools/PDLL/Parser/Parser.cpp b/mlir/lib/Tools/PDLL/Parser/Parser.cpp index 2f842df..575fb4a 100644 --- a/mlir/lib/Tools/PDLL/Parser/Parser.cpp +++ b/mlir/lib/Tools/PDLL/Parser/Parser.cpp @@ -148,9 +148,8 @@ private: std::string docStr; { llvm::raw_string_ostream docOS(docStr); - std::string tmpDocStr = doc.str(); raw_indented_ostream(docOS).printReindented( - StringRef(tmpDocStr).rtrim(" \t")); + StringRef(docStr).rtrim(" \t")); } return docStr; } diff --git a/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp b/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp index 0b88d31..5a7b176 100644 --- a/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp +++ b/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp @@ -312,8 +312,7 @@ static LogicalResult doVerifyRoundTrip(Operation *op, FallbackAsmResourceMap fallbackResourceMap; ParserConfig parseConfig(&roundtripContext, /*verifyAfterParse=*/true, &fallbackResourceMap); - roundtripModule = - parseSourceString<Operation *>(ostream.str(), parseConfig); + roundtripModule = parseSourceString<Operation *>(buffer, parseConfig); if (!roundtripModule) { op->emitOpError() << "failed to parse " << testType << " content back, cannot verify round-trip.\n"; diff --git a/mlir/lib/Transforms/RemoveDeadValues.cpp b/mlir/lib/Transforms/RemoveDeadValues.cpp index 0552569..1cc0096 100644 --- a/mlir/lib/Transforms/RemoveDeadValues.cpp +++ b/mlir/lib/Transforms/RemoveDeadValues.cpp @@ -191,10 +191,10 @@ static void cleanSimpleOp(Operation *op, RunLivenessAnalysis &la) { /// non-live across all callers), /// (5) Dropping the uses of these return values from its callers, AND /// (6) Erasing these return values -/// iff it is not public. +/// iff it is not public or declaration. static void cleanFuncOp(FunctionOpInterface funcOp, Operation *module, RunLivenessAnalysis &la) { - if (funcOp.isPublic()) + if (funcOp.isPublic() || funcOp.isDeclaration()) return; // Get the list of unnecessary (non-live) arguments in `nonLiveArgs`. diff --git a/mlir/lib/Transforms/ViewOpGraph.cpp b/mlir/lib/Transforms/ViewOpGraph.cpp index 82e9863..fa0af766 100644 --- a/mlir/lib/Transforms/ViewOpGraph.cpp +++ b/mlir/lib/Transforms/ViewOpGraph.cpp @@ -46,7 +46,7 @@ static std::string strFromOs(function_ref<void(raw_ostream &)> func) { std::string buf; llvm::raw_string_ostream os(buf); func(os); - return os.str(); + return buf; } /// Escape special characters such as '\n' and quotation marks. @@ -199,7 +199,7 @@ private: std::string buf; llvm::raw_string_ostream ss(buf); attr.print(ss); - os << truncateString(ss.str()); + os << truncateString(buf); } /// Append an edge to the list of edges. @@ -262,7 +262,7 @@ private: std::string buf; llvm::raw_string_ostream ss(buf); interleaveComma(op->getResultTypes(), ss); - os << truncateString(ss.str()) << ")"; + os << truncateString(buf) << ")"; } // Print attributes. diff --git a/mlir/test/Transforms/remove-dead-values.mlir b/mlir/test/Transforms/remove-dead-values.mlir index 69426fd..a31cb97c 100644 --- a/mlir/test/Transforms/remove-dead-values.mlir +++ b/mlir/test/Transforms/remove-dead-values.mlir @@ -357,3 +357,8 @@ func.func @kernel(%arg0: memref<18xf32>) { // CHECK: gpu.launch blocks // CHECK: memref.store // CHECK-NEXT: gpu.terminator + +// ----- + +// CHECK: func.func private @no_block_func_declaration() +func.func private @no_block_func_declaration() -> () diff --git a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp index 7d42c03..aa5a52a 100644 --- a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp +++ b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp @@ -378,7 +378,6 @@ static std::string generateCppExpression(SerializedAffineMap self, std::string printedStr; llvm::raw_string_ostream printedSs(printedStr); self.affineMapAttr.print(printedSs); - printedSs.flush(); static const char exprFormat[] = R"FMT(llvm::cast<AffineMapAttr>(mlir::parseAttribute("{0}", {1})).getValue())FMT"; @@ -391,7 +390,6 @@ static std::string interleaveToString(Container &container, std::string result; llvm::raw_string_ostream ss(result); llvm::interleave(container, ss, separator); - ss.flush(); return result; } @@ -827,7 +825,6 @@ generateNamedGenericOpDefns(LinalgOpConfig &opConfig, break; } }); - ss.flush(); os << llvm::formatv(structuredOpIteratorTypesFormat, className, iteratorsStr); } else { @@ -892,7 +889,6 @@ exprs.push_back(getAffineConstantExpr(cst{1}, context)); std::string symbolBindingsStr; llvm::raw_string_ostream symbolBindingsSs(symbolBindingsStr); llvm::interleave(symbolBindings, symbolBindingsSs, "\n"); - symbolBindingsSs.flush(); os << llvm::formatv(structuredOpSymbolBindingsFormat, className, symbolBindingsStr); @@ -913,7 +909,6 @@ exprs.push_back(getAffineConstantExpr(cst{1}, context)); llvm::raw_string_ostream dimIdentsSs(dimIdentsStr); llvm::interleaveComma(dimIndices, dimIdentsSs, [&](unsigned i) { dimIdentsSs << "d" << i; }); - dimIdentsSs.flush(); // Statements to add and simplify each affine map. SmallVector<std::string> stmts; diff --git a/mlir/tools/mlir-pdll/mlir-pdll.cpp b/mlir/tools/mlir-pdll/mlir-pdll.cpp index c6ad6c3..0fcf8d1 100644 --- a/mlir/tools/mlir-pdll/mlir-pdll.cpp +++ b/mlir/tools/mlir-pdll/mlir-pdll.cpp @@ -207,7 +207,7 @@ int main(int argc, char **argv) { // any. if (auto existingOrErr = llvm::MemoryBuffer::getFile(outputFilename, /*IsText=*/true)) - if (std::move(existingOrErr.get())->getBuffer() == outputStrOS.str()) + if (std::move(existingOrErr.get())->getBuffer() == outputStr) shouldWriteOutput = false; } @@ -219,7 +219,7 @@ int main(int argc, char **argv) { llvm::errs() << errorMessage << "\n"; return 1; } - outputFile->os() << outputStrOS.str(); + outputFile->os() << outputStr; outputFile->keep(); } diff --git a/mlir/tools/mlir-src-sharder/mlir-src-sharder.cpp b/mlir/tools/mlir-src-sharder/mlir-src-sharder.cpp index 2f92ff2..9ec9b72 100644 --- a/mlir/tools/mlir-src-sharder/mlir-src-sharder.cpp +++ b/mlir/tools/mlir-src-sharder/mlir-src-sharder.cpp @@ -98,7 +98,7 @@ int main(int argc, char **argv) { // any. if (auto existingOrErr = llvm::MemoryBuffer::getFile(outputFilename, /*IsText=*/true)) - if (std::move(existingOrErr.get())->getBuffer() == os.str()) + if (std::move(existingOrErr.get())->getBuffer() == outputStr) shouldWriteOutput = false; } diff --git a/mlir/tools/mlir-tblgen/OpDocGen.cpp b/mlir/tools/mlir-tblgen/OpDocGen.cpp index d60eda0..bf75957 100644 --- a/mlir/tools/mlir-tblgen/OpDocGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDocGen.cpp @@ -162,7 +162,7 @@ static void emitOpTraitsDoc(const Operator &op, raw_ostream &os) { os << effect << " on " << rec->getValueAsString("resource"); }); os << "}"; - effects.insert(backticks(os.str())); + effects.insert(backticks(effectStr)); name.append(llvm::formatv(" ({0})", traitName).str()); } interfaces.insert(backticks(name)); @@ -433,7 +433,7 @@ static void maybeNest(bool nest, llvm::function_ref<void(raw_ostream &os)> fn, std::string str; llvm::raw_string_ostream ss(str); fn(ss); - for (StringRef x : llvm::split(ss.str(), "\n")) { + for (StringRef x : llvm::split(str, "\n")) { if (nest && x.starts_with("#")) os << "#"; os << x << "\n"; diff --git a/mlir/unittests/Bytecode/BytecodeTest.cpp b/mlir/unittests/Bytecode/BytecodeTest.cpp index a37a2af..0342f29 100644 --- a/mlir/unittests/Bytecode/BytecodeTest.cpp +++ b/mlir/unittests/Bytecode/BytecodeTest.cpp @@ -49,7 +49,6 @@ TEST(Bytecode, MultiModuleWithResource) { std::string buffer; llvm::raw_string_ostream ostream(buffer); ASSERT_TRUE(succeeded(writeBytecodeToFile(module.get(), ostream))); - ostream.flush(); // Create copy of buffer which is aligned to requested resource alignment. constexpr size_t kAlignment = 0x20; @@ -139,7 +138,7 @@ TEST(Bytecode, OpWithoutProperties) { ASSERT_TRUE(succeeded(writeBytecodeToFile(op.get(), os))); std::unique_ptr<Block> block = std::make_unique<Block>(); ASSERT_TRUE(succeeded(readBytecodeFile( - llvm::MemoryBufferRef(os.str(), "string-buffer"), block.get(), config))); + llvm::MemoryBufferRef(bytecode, "string-buffer"), block.get(), config))); Operation *roundtripped = &block->front(); EXPECT_EQ(roundtripped->getAttrs().size(), 2u); EXPECT_TRUE(roundtripped->getInherentAttr("inherent_attr") != std::nullopt); diff --git a/mlir/unittests/IR/AttributeTest.cpp b/mlir/unittests/IR/AttributeTest.cpp index e72bfe9..981e919 100644 --- a/mlir/unittests/IR/AttributeTest.cpp +++ b/mlir/unittests/IR/AttributeTest.cpp @@ -498,7 +498,7 @@ TEST(CopyCountAttr, PrintStripped) { os << "|" << res << "|"; res.printStripped(os << "["); os << "]"; - EXPECT_EQ(os.str(), "|#test.copy_count<hello>|[copy_count<hello>]"); + EXPECT_EQ(str, "|#test.copy_count<hello>|[copy_count<hello>]"); } } // namespace diff --git a/mlir/unittests/IR/OpPropertiesTest.cpp b/mlir/unittests/IR/OpPropertiesTest.cpp index 365775d..b4a633a 100644 --- a/mlir/unittests/IR/OpPropertiesTest.cpp +++ b/mlir/unittests/IR/OpPropertiesTest.cpp @@ -191,7 +191,7 @@ TEST(OpPropertiesTest, Properties) { "array = array<i64: 40, 41>, " "b = -4.200000e+01 : f32, " "label = \"bar foo\"}> : () -> ()\n", - os.str().c_str()); + output.c_str()); } // Get a mutable reference to the properties for this operation and modify it // in place one member at a time. @@ -201,40 +201,44 @@ TEST(OpPropertiesTest, Properties) { std::string output; llvm::raw_string_ostream os(output); opWithProp.print(os); - EXPECT_TRUE(StringRef(os.str()).contains("a = 42")); - EXPECT_TRUE(StringRef(os.str()).contains("b = -4.200000e+01")); - EXPECT_TRUE(StringRef(os.str()).contains("array = array<i64: 40, 41>")); - EXPECT_TRUE(StringRef(os.str()).contains("label = \"bar foo\"")); + StringRef view(output); + EXPECT_TRUE(view.contains("a = 42")); + EXPECT_TRUE(view.contains("b = -4.200000e+01")); + EXPECT_TRUE(view.contains("array = array<i64: 40, 41>")); + EXPECT_TRUE(view.contains("label = \"bar foo\"")); } prop.b = 42.; { std::string output; llvm::raw_string_ostream os(output); opWithProp.print(os); - EXPECT_TRUE(StringRef(os.str()).contains("a = 42")); - EXPECT_TRUE(StringRef(os.str()).contains("b = 4.200000e+01")); - EXPECT_TRUE(StringRef(os.str()).contains("array = array<i64: 40, 41>")); - EXPECT_TRUE(StringRef(os.str()).contains("label = \"bar foo\"")); + StringRef view(output); + EXPECT_TRUE(view.contains("a = 42")); + EXPECT_TRUE(view.contains("b = 4.200000e+01")); + EXPECT_TRUE(view.contains("array = array<i64: 40, 41>")); + EXPECT_TRUE(view.contains("label = \"bar foo\"")); } prop.array.push_back(42); { std::string output; llvm::raw_string_ostream os(output); opWithProp.print(os); - EXPECT_TRUE(StringRef(os.str()).contains("a = 42")); - EXPECT_TRUE(StringRef(os.str()).contains("b = 4.200000e+01")); - EXPECT_TRUE(StringRef(os.str()).contains("array = array<i64: 40, 41, 42>")); - EXPECT_TRUE(StringRef(os.str()).contains("label = \"bar foo\"")); + StringRef view(output); + EXPECT_TRUE(view.contains("a = 42")); + EXPECT_TRUE(view.contains("b = 4.200000e+01")); + EXPECT_TRUE(view.contains("array = array<i64: 40, 41, 42>")); + EXPECT_TRUE(view.contains("label = \"bar foo\"")); } prop.label = std::make_shared<std::string>("foo bar"); { std::string output; llvm::raw_string_ostream os(output); opWithProp.print(os); - EXPECT_TRUE(StringRef(os.str()).contains("a = 42")); - EXPECT_TRUE(StringRef(os.str()).contains("b = 4.200000e+01")); - EXPECT_TRUE(StringRef(os.str()).contains("array = array<i64: 40, 41, 42>")); - EXPECT_TRUE(StringRef(os.str()).contains("label = \"foo bar\"")); + StringRef view(output); + EXPECT_TRUE(view.contains("a = 42")); + EXPECT_TRUE(view.contains("b = 4.200000e+01")); + EXPECT_TRUE(view.contains("array = array<i64: 40, 41, 42>")); + EXPECT_TRUE(view.contains("label = \"foo bar\"")); } } @@ -297,9 +301,10 @@ TEST(OpPropertiesTest, DefaultValues) { std::string output; llvm::raw_string_ostream os(output); op->print(os); - EXPECT_TRUE(StringRef(os.str()).contains("a = -1")); - EXPECT_TRUE(StringRef(os.str()).contains("b = -1")); - EXPECT_TRUE(StringRef(os.str()).contains("array = array<i64: -33>")); + StringRef view(output); + EXPECT_TRUE(view.contains("a = -1")); + EXPECT_TRUE(view.contains("b = -1")); + EXPECT_TRUE(view.contains("array = array<i64: -33>")); } op->erase(); } @@ -371,9 +376,10 @@ TEST(OpPropertiesTest, getOrAddProperties) { std::string output; llvm::raw_string_ostream os(output); op->print(os); - EXPECT_TRUE(StringRef(os.str()).contains("a = 1")); - EXPECT_TRUE(StringRef(os.str()).contains("b = 2")); - EXPECT_TRUE(StringRef(os.str()).contains("array = array<i64: 3, 4, 5>")); + StringRef view(output); + EXPECT_TRUE(view.contains("a = 1")); + EXPECT_TRUE(view.contains("b = 2")); + EXPECT_TRUE(view.contains("array = array<i64: 3, 4, 5>")); } op->erase(); } @@ -400,8 +406,9 @@ TEST(OpPropertiesTest, withoutPropertiesDiscardableAttrs) { std::string output; llvm::raw_string_ostream os(output); op->print(os); - EXPECT_TRUE(StringRef(os.str()).contains("inherent_attr = 42")); - EXPECT_TRUE(StringRef(os.str()).contains("other_attr = 56")); + StringRef view(output); + EXPECT_TRUE(view.contains("inherent_attr = 42")); + EXPECT_TRUE(view.contains("other_attr = 56")); OwningOpRef<Operation *> reparsed = parseSourceString(os.str(), config); auto trivialHash = [](Value v) { return hash_value(v); }; diff --git a/mlir/unittests/Support/IndentedOstreamTest.cpp b/mlir/unittests/Support/IndentedOstreamTest.cpp index 08a4de5..804a7e4 100644 --- a/mlir/unittests/Support/IndentedOstreamTest.cpp +++ b/mlir/unittests/Support/IndentedOstreamTest.cpp @@ -18,7 +18,7 @@ TEST(FormatTest, SingleLine) { raw_indented_ostream ros(os); ros << 10; ros.flush(); - EXPECT_THAT(os.str(), StrEq("10")); + EXPECT_THAT(str, StrEq("10")); } TEST(FormatTest, SimpleMultiLine) { @@ -31,7 +31,7 @@ TEST(FormatTest, SimpleMultiLine) { ros << "c"; ros << "\n"; ros.flush(); - EXPECT_THAT(os.str(), StrEq("ab\nc\n")); + EXPECT_THAT(str, StrEq("ab\nc\n")); } TEST(FormatTest, SimpleMultiLineIndent) { @@ -44,7 +44,7 @@ TEST(FormatTest, SimpleMultiLineIndent) { ros << "c"; ros << "\n"; ros.flush(); - EXPECT_THAT(os.str(), StrEq(" a b\n c\n")); + EXPECT_THAT(str, StrEq(" a b\n c\n")); } TEST(FormatTest, SingleRegion) { @@ -71,7 +71,7 @@ TEST(FormatTest, SingleRegion) { inner inner } after)"; - EXPECT_THAT(os.str(), StrEq(expected)); + EXPECT_THAT(str, StrEq(expected)); // Repeat the above with inline form. str.clear(); @@ -106,7 +106,7 @@ TEST(FormatTest, Reindent) { )"; - EXPECT_THAT(os.str(), StrEq(expected)); + EXPECT_THAT(str, StrEq(expected)); } TEST(FormatTest, ReindentLineEndings) { @@ -122,5 +122,5 @@ TEST(FormatTest, ReindentLineEndings) { ros.printReindented(desc); ros.flush(); const auto *expected = "First line\r\n second line"; - EXPECT_THAT(os.str(), StrEq(expected)); + EXPECT_THAT(str, StrEq(expected)); } diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel index 834d309..664bb62 100644 --- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel @@ -2174,7 +2174,12 @@ libc_math_function( ], ) -libc_math_function(name = "fmul") +libc_math_function( + name = "fmul", + additional_deps = [ + ":__support_fputil_double_double", + ], +) libc_math_function(name = "fmull") diff --git a/utils/bazel/llvm-project-overlay/lld/BUILD.bazel b/utils/bazel/llvm-project-overlay/lld/BUILD.bazel index db58049..a08ce54 100644 --- a/utils/bazel/llvm-project-overlay/lld/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/lld/BUILD.bazel @@ -227,6 +227,7 @@ cc_library( "//llvm:BinaryFormat", "//llvm:BitReader", "//llvm:BitWriter", + "//llvm:CGData", "//llvm:Core", "//llvm:DebugInfoDWARF", "//llvm:Demangle", diff --git a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel index 9402fd1..bbb0435 100644 --- a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel @@ -1408,7 +1408,7 @@ cc_library( "lib/Transforms/Instrumentation/*.inc", ]), hdrs = glob(["include/llvm/Transforms/Instrumentation/*.h"]) + [ - "include/llvm/Transforms/Instrumentation.h", + "include/llvm/Transforms/Utils/Instrumentation.h", ], copts = llvm_copts, deps = [ diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel index 46ebe9b..034b875 100644 --- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -3353,6 +3353,7 @@ cc_library( ":DialectUtils", ":IR", ":InferTypeOpInterface", + ":InliningUtils", ":MeshIncGen", ":SideEffectInterfaces", ":Support", |