diff options
172 files changed, 6469 insertions, 1710 deletions
diff --git a/bolt/include/bolt/Core/BinaryContext.h b/bolt/include/bolt/Core/BinaryContext.h index 8960b19..5cbc28f 100644 --- a/bolt/include/bolt/Core/BinaryContext.h +++ b/bolt/include/bolt/Core/BinaryContext.h @@ -781,11 +781,6 @@ public: uint64_t PseudoProbeLooseMatchedSampleCount{0}; /// the count of call matched samples uint64_t CallMatchedSampleCount{0}; - /// the number of stale functions that have matching number of blocks in - /// the profile - uint64_t NumStaleFuncsWithEqualBlockCount{0}; - /// the number of blocks that have matching size but a differing hash - uint64_t NumStaleBlocksWithEqualIcount{0}; } Stats; // Original binary execution count stats. diff --git a/bolt/lib/Passes/BinaryPasses.cpp b/bolt/lib/Passes/BinaryPasses.cpp index e1a1856..1d187de 100644 --- a/bolt/lib/Passes/BinaryPasses.cpp +++ b/bolt/lib/Passes/BinaryPasses.cpp @@ -1508,12 +1508,6 @@ Error PrintProgramStats::runOnFunctions(BinaryContext &BC) { if (NumAllStaleFunctions) { const float PctStale = NumAllStaleFunctions / (float)NumAllProfiledFunctions * 100.0f; - const float PctStaleFuncsWithEqualBlockCount = - (float)BC.Stats.NumStaleFuncsWithEqualBlockCount / - NumAllStaleFunctions * 100.0f; - const float PctStaleBlocksWithEqualIcount = - (float)BC.Stats.NumStaleBlocksWithEqualIcount / - BC.Stats.NumStaleBlocks * 100.0f; auto printErrorOrWarning = [&]() { if (PctStale > opts::StaleThreshold) BC.errs() << "BOLT-ERROR: "; @@ -1536,17 +1530,6 @@ Error PrintProgramStats::runOnFunctions(BinaryContext &BC) { << "%) belong to functions with invalid" " (possibly stale) profile.\n"; } - BC.outs() << "BOLT-INFO: " << BC.Stats.NumStaleFuncsWithEqualBlockCount - << " stale function" - << (BC.Stats.NumStaleFuncsWithEqualBlockCount == 1 ? "" : "s") - << format(" (%.1f%% of all stale)", - PctStaleFuncsWithEqualBlockCount) - << " have matching block count.\n"; - BC.outs() << "BOLT-INFO: " << BC.Stats.NumStaleBlocksWithEqualIcount - << " stale block" - << (BC.Stats.NumStaleBlocksWithEqualIcount == 1 ? "" : "s") - << format(" (%.1f%% of all stale)", PctStaleBlocksWithEqualIcount) - << " have matching icount.\n"; if (PctStale > opts::StaleThreshold) { return createFatalBOLTError( Twine("BOLT-ERROR: stale functions exceed specified threshold of ") + diff --git a/bolt/lib/Profile/YAMLProfileReader.cpp b/bolt/lib/Profile/YAMLProfileReader.cpp index 086e47b..f0f87f9 100644 --- a/bolt/lib/Profile/YAMLProfileReader.cpp +++ b/bolt/lib/Profile/YAMLProfileReader.cpp @@ -350,9 +350,6 @@ bool YAMLProfileReader::parseFunctionProfile( << MismatchedCalls << " calls, and " << MismatchedEdges << " edges in profile did not match function " << BF << '\n'; - if (YamlBF.NumBasicBlocks != BF.size()) - ++BC.Stats.NumStaleFuncsWithEqualBlockCount; - if (!opts::InferStaleProfile) return false; ArrayRef<ProbeMatchSpec> ProbeMatchSpecs; diff --git a/clang-tools-extra/clang-tidy/ClangTidyOptions.cpp b/clang-tools-extra/clang-tidy/ClangTidyOptions.cpp index 21455db..c4b47a4 100644 --- a/clang-tools-extra/clang-tidy/ClangTidyOptions.cpp +++ b/clang-tools-extra/clang-tidy/ClangTidyOptions.cpp @@ -247,7 +247,7 @@ ClangTidyOptions ClangTidyOptions::getDefaults() { Options.WarningsAsErrors = ""; Options.HeaderFileExtensions = {"", "h", "hh", "hpp", "hxx"}; Options.ImplementationFileExtensions = {"c", "cc", "cpp", "cxx"}; - Options.HeaderFilterRegex = ""; + Options.HeaderFilterRegex = ".*"; Options.ExcludeHeaderFilterRegex = ""; Options.SystemHeaders = false; Options.FormatStyle = "none"; diff --git a/clang-tools-extra/clang-tidy/tool/ClangTidyMain.cpp b/clang-tools-extra/clang-tidy/tool/ClangTidyMain.cpp index 64157f5..1ae8756 100644 --- a/clang-tools-extra/clang-tidy/tool/ClangTidyMain.cpp +++ b/clang-tools-extra/clang-tidy/tool/ClangTidyMain.cpp @@ -93,7 +93,7 @@ Configuration files: WarningsAsErrors: '' HeaderFileExtensions: ['', 'h','hh','hpp','hxx'] ImplementationFileExtensions: ['c','cc','cpp','cxx'] - HeaderFilterRegex: '' + HeaderFilterRegex: '.*' FormatStyle: none InheritParentConfig: true User: user @@ -132,14 +132,16 @@ file, if any. static cl::opt<std::string> HeaderFilter("header-filter", desc(R"( Regular expression matching the names of the -headers to output diagnostics from. Diagnostics +headers to output diagnostics from. The default +value is '.*', i.e. diagnostics from all non-system +headers are displayed by default. Diagnostics from the main file of each translation unit are always displayed. Can be used together with -line-filter. This option overrides the 'HeaderFilterRegex' option in .clang-tidy file, if any. )"), - cl::init(""), + cl::init(".*"), cl::cat(ClangTidyCategory)); static cl::opt<std::string> ExcludeHeaderFilter("exclude-header-filter", @@ -379,9 +381,9 @@ static void printStats(const ClangTidyStats &Stats) { << " with check filters"; llvm::errs() << ").\n"; if (Stats.ErrorsIgnoredNonUserCode) - llvm::errs() << "Use -header-filter=.* to display errors from all " - "non-system headers. Use -system-headers to display " - "errors from system headers as well.\n"; + llvm::errs() << "Use -header-filter=.* or leave it as default to display " + "errors from all non-system headers. Use -system-headers " + "to display errors from system headers as well.\n"; } } diff --git a/clang-tools-extra/clang-tidy/utils/FixItHintUtils.cpp b/clang-tools-extra/clang-tidy/utils/FixItHintUtils.cpp index 086c7f3..b30c83e 100644 --- a/clang-tools-extra/clang-tidy/utils/FixItHintUtils.cpp +++ b/clang-tools-extra/clang-tidy/utils/FixItHintUtils.cpp @@ -21,6 +21,11 @@ FixItHint changeVarDeclToReference(const VarDecl &Var, ASTContext &Context) { SourceLocation AmpLocation = Var.getLocation(); auto Token = utils::lexer::getPreviousToken( AmpLocation, Context.getSourceManager(), Context.getLangOpts()); + + // For parameter packs the '&' must go before the '...' token + if (Token.is(tok::ellipsis)) + return FixItHint::CreateInsertion(Token.getLocation(), "&"); + if (!Token.is(tok::unknown)) AmpLocation = Lexer::getLocForEndOfToken(Token.getLocation(), 0, Context.getSourceManager(), diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst index 915b793..8f4be0d 100644 --- a/clang-tools-extra/docs/ReleaseNotes.rst +++ b/clang-tools-extra/docs/ReleaseNotes.rst @@ -70,6 +70,11 @@ Potentially Breaking Changes :doc:`bugprone-signed-char-misuse <clang-tidy/checks/bugprone/signed-char-misuse>` +- :program:`clang-tidy` now displays warnings from all non-system headers by + default. Previously, users had to explicitly opt-in to header warnings using + `-header-filter='.*'`. To disable warnings from non-system, set `-header-filter` + to an empty string. + Improvements to clangd ---------------------- @@ -132,6 +137,11 @@ Improvements to clang-tidy when run over C files. If ``-std`` is not specified, it defaults to ``c99-or-later``. +- :program:`clang-tidy` now displays warnings from all non-system headers by + default. Previously, users had to explicitly opt-in to header warnings using + `-header-filter='.*'`. To disable warnings from non-system, set `-header-filter` + to an empty string. + - :program:`clang-tidy` no longer attempts to analyze code from system headers by default, greatly improving performance. This behavior is disabled if the `SystemHeaders` option is enabled. @@ -407,7 +417,8 @@ Changes in existing checks - Improved :doc:`performance-unnecessary-value-param <clang-tidy/checks/performance/unnecessary-value-param>` by printing - the type of the diagnosed variable. + the type of the diagnosed variable and correctly generating fix-it hints for + parameter-pack arguments. - Improved :doc:`portability-template-virtual-member-function <clang-tidy/checks/portability/template-virtual-member-function>` check to diff --git a/clang-tools-extra/docs/clang-tidy/index.rst b/clang-tools-extra/docs/clang-tidy/index.rst index bd2c40e..6ff82bf 100644 --- a/clang-tools-extra/docs/clang-tidy/index.rst +++ b/clang-tools-extra/docs/clang-tidy/index.rst @@ -215,7 +215,9 @@ An overview of all the command-line options: This option overrides the 'FormatStyle` option in .clang-tidy file, if any. --header-filter=<string> - Regular expression matching the names of the - headers to output diagnostics from. Diagnostics + headers to output diagnostics from. The default + value is '.*', i.e. diagnostics from all non-system + headers are displayed by default. Diagnostics from the main file of each translation unit are always displayed. Can be used together with -line-filter. @@ -338,7 +340,7 @@ An overview of all the command-line options: WarningsAsErrors: '' HeaderFileExtensions: ['', 'h','hh','hpp','hxx'] ImplementationFileExtensions: ['c','cc','cpp','cxx'] - HeaderFilterRegex: '' + HeaderFilterRegex: '.*' FormatStyle: none InheritParentConfig: true User: user diff --git a/clang-tools-extra/test/clang-tidy/checkers/abseil/no-internal-dependencies.cpp b/clang-tools-extra/test/clang-tidy/checkers/abseil/no-internal-dependencies.cpp index 2949d7f..f6eb7c5 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/abseil/no-internal-dependencies.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/abseil/no-internal-dependencies.cpp @@ -1,4 +1,4 @@ -// RUN: %check_clang_tidy %s abseil-no-internal-dependencies %t, -- -- -I %S/Inputs +// RUN: %check_clang_tidy %s abseil-no-internal-dependencies %t, -- -header-filter='' -- -I %S/Inputs // RUN: clang-tidy -checks='-*, abseil-no-internal-dependencies' -header-filter='.*' %s -- -I %S/Inputs 2>&1 | FileCheck %s #include "absl/strings/internal-file.h" diff --git a/clang-tools-extra/test/clang-tidy/checkers/abseil/no-namespace.cpp b/clang-tools-extra/test/clang-tidy/checkers/abseil/no-namespace.cpp index 78821c3..c8a5752 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/abseil/no-namespace.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/abseil/no-namespace.cpp @@ -1,4 +1,4 @@ -// RUN: %check_clang_tidy %s abseil-no-namespace %t -- -- -I %S/Inputs +// RUN: %check_clang_tidy %s abseil-no-namespace %t -- -header-filter='' -- -I %S/Inputs // RUN: clang-tidy -checks='-*, abseil-no-namespace' -header-filter='.*' %s -- -I %S/Inputs 2>&1 | FileCheck %s /// Warning will not be triggered on internal Abseil code that is included. diff --git a/clang-tools-extra/test/clang-tidy/checkers/bugprone/reserved-identifier.cpp b/clang-tools-extra/test/clang-tidy/checkers/bugprone/reserved-identifier.cpp index 0f36efe..b17e890 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/bugprone/reserved-identifier.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/reserved-identifier.cpp @@ -1,8 +1,9 @@ -// RUN: %check_clang_tidy %s bugprone-reserved-identifier %t -- -- \ +// RUN: %check_clang_tidy %s bugprone-reserved-identifier %t -- \ +// RUN: -header-filter='' -- \ // RUN: -I%S/Inputs/reserved-identifier \ // RUN: -isystem %S/Inputs/reserved-identifier/system -// no warnings expected without -header-filter= +// no warnings expected with -header-filter='' #include "user-header.h" #include <system-header.h> diff --git a/clang-tools-extra/test/clang-tidy/checkers/google/upgrade-googletest-case.cpp b/clang-tools-extra/test/clang-tidy/checkers/google/upgrade-googletest-case.cpp index edb11b9..5b30541 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/google/upgrade-googletest-case.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/google/upgrade-googletest-case.cpp @@ -1,5 +1,5 @@ -// RUN: %check_clang_tidy %s google-upgrade-googletest-case %t -- -- -I%S/Inputs -// RUN: %check_clang_tidy -check-suffix=NOSUITE %s google-upgrade-googletest-case %t -- -- -DNOSUITE -I%S/Inputs/gtest/nosuite +// RUN: %check_clang_tidy %s google-upgrade-googletest-case %t -- -- -isystem%S/Inputs +// RUN: %check_clang_tidy -check-suffix=NOSUITE %s google-upgrade-googletest-case %t -- -- -DNOSUITE -isystem%S/Inputs/gtest/nosuite #include "gtest/gtest.h" diff --git a/clang-tools-extra/test/clang-tidy/checkers/modernize/replace-auto-ptr.cpp b/clang-tools-extra/test/clang-tidy/checkers/modernize/replace-auto-ptr.cpp index 2281c1a..371f3dd 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/modernize/replace-auto-ptr.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/modernize/replace-auto-ptr.cpp @@ -1,4 +1,4 @@ -// RUN: %check_clang_tidy %s modernize-replace-auto-ptr %t -- -- -I %S/Inputs/replace-auto-ptr +// RUN: %check_clang_tidy %s modernize-replace-auto-ptr %t -- -- -isystem %S/Inputs/replace-auto-ptr // CHECK-FIXES: #include <utility> diff --git a/clang-tools-extra/test/clang-tidy/checkers/modernize/use-using.cpp b/clang-tools-extra/test/clang-tidy/checkers/modernize/use-using.cpp index 8288f39..5b8eca2 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/modernize/use-using.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/modernize/use-using.cpp @@ -1,4 +1,4 @@ -// RUN: %check_clang_tidy %s modernize-use-using %t -- -- -fno-delayed-template-parsing -I %S/Inputs/use-using/ +// RUN: %check_clang_tidy %s modernize-use-using %t -- -- -fno-delayed-template-parsing -isystem %S/Inputs/use-using/ typedef int Type; // CHECK-MESSAGES: :[[@LINE-1]]:1: warning: use 'using' instead of 'typedef' [modernize-use-using] diff --git a/clang-tools-extra/test/clang-tidy/checkers/performance/unnecessary-value-param-templates.cpp b/clang-tools-extra/test/clang-tidy/checkers/performance/unnecessary-value-param-templates.cpp index 688c79b..61758c5 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/performance/unnecessary-value-param-templates.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/performance/unnecessary-value-param-templates.cpp @@ -96,3 +96,34 @@ void lambdaNonConstAutoValue() { }; fn(ExpensiveToCopyType()); } + +template <typename... Args> +void ParameterPack(Args... args) { + // CHECK-MESSAGES: [[@LINE-1]]:28: warning: the parameter 'args' of type 'ExpensiveToCopyType' + // CHECK-FIXES: void ParameterPack(const Args&... args) { +} + +template <typename... Args> +void ParameterPackConst(Args const... args) { + // CHECK-MESSAGES: [[@LINE-1]]:39: warning: the const qualified parameter 'args' of type 'const ExpensiveToCopyType' + // CHECK-FIXES: void ParameterPackConst(Args const&... args) { +} + +template <typename... Args> +void ParameterPackWithParams(const ExpensiveToCopyType E1, ExpensiveToCopyType E2, Args... args) { + // CHECK-MESSAGES: [[@LINE-1]]:56: warning: the const qualified parameter 'E1' + // CHECK-MESSAGES: [[@LINE-2]]:80: warning: the parameter 'E2' + // CHECK-MESSAGES: [[@LINE-3]]:92: warning: the parameter 'args' + // CHECK-FIXES: void ParameterPackWithParams(const ExpensiveToCopyType& E1, const ExpensiveToCopyType& E2, const Args&... args) { +} + +template <typename... Args> +void PackWithNonExpensive(int x, Args... args) {} + +void instantiatedParameterPack() { + ExpensiveToCopyType E; + ParameterPack(E); + ParameterPackConst(E); + ParameterPackWithParams(E, E, E); + PackWithNonExpensive(5, 5); +} diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/duplicate-include.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/duplicate-include.cpp index 223f077..c452f69 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/readability/duplicate-include.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/readability/duplicate-include.cpp @@ -1,4 +1,6 @@ -// RUN: %check_clang_tidy %s readability-duplicate-include %t -- -- -isystem %S/Inputs/duplicate-include/system -I %S/Inputs/duplicate-include +// RUN: %check_clang_tidy %s readability-duplicate-include %t -- \ +// RUN: -header-filter='' \ +// RUN: -- -isystem %S/Inputs/duplicate-include/system -I %S/Inputs/duplicate-include int a; #include <string.h> diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming.cpp index 9180733..1d06df3 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming.cpp @@ -86,7 +86,9 @@ // RUN: readability-identifier-naming.LocalPointerPrefix: 'l_', \ // RUN: readability-identifier-naming.LocalConstantPointerCase: CamelCase, \ // RUN: readability-identifier-naming.LocalConstantPointerPrefix: 'lc_', \ -// RUN: }}' -- -fno-delayed-template-parsing -Dbad_macro \ +// RUN: }}' \ +// RUN: -header-filter='' \ +// RUN: -- -fno-delayed-template-parsing -Dbad_macro \ // RUN: -I%S/Inputs/identifier-naming \ // RUN: -isystem %S/Inputs/identifier-naming/system @@ -95,8 +97,7 @@ #include <system-header.h> #include <coroutines.h> #include "user-header.h" -// NO warnings or fixes expected from declarations within header files without -// the -header-filter= option +// NO warnings or fixes expected from declarations with the -header-filter='' option namespace FOO_NS { // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: invalid case style for namespace 'FOO_NS' [readability-identifier-naming] diff --git a/clang-tools-extra/test/clang-tidy/infrastructure/default-header-filter.cpp b/clang-tools-extra/test/clang-tidy/infrastructure/default-header-filter.cpp new file mode 100644 index 0000000..489b302 --- /dev/null +++ b/clang-tools-extra/test/clang-tidy/infrastructure/default-header-filter.cpp @@ -0,0 +1,27 @@ + +// RUN: clang-tidy -checks='-*,google-explicit-constructor' --config='{}' %s -- -I %S/Inputs/file-filter -isystem %S/Inputs/file-filter/system 2>&1 | FileCheck --check-prefix=CHECK-DEFAULT %s +// RUN: clang-tidy -checks='-*,google-explicit-constructor' --config='{}' -header-filter='' %s -- -I %S/Inputs/file-filter -isystem %S/Inputs/file-filter/system 2>&1 | FileCheck --check-prefix=CHECK-EMPTY %s +// RUN: clang-tidy -checks='-*,google-explicit-constructor' --config='{}' -header-filter='.*' %s -- -I %S/Inputs/file-filter -isystem %S/Inputs/file-filter/system 2>&1 | FileCheck --check-prefix=CHECK-EXPLICIT %s +// RUN: clang-tidy -checks='-*,google-explicit-constructor' --config='{}' %s -- -I %S/Inputs/file-filter -isystem %S/Inputs/file-filter/system 2>&1 | FileCheck --check-prefix=CHECK-NO-SYSTEM %s +// RUN: clang-tidy -checks='-*,google-explicit-constructor' --config='{}' -system-headers %s -- -I %S/Inputs/file-filter -isystem %S/Inputs/file-filter/system 2>&1 | FileCheck --check-prefix=CHECK-WITH-SYSTEM %s + +#include "header1.h" +// CHECK-DEFAULT: header1.h:1:12: warning: single-argument constructors must be marked explicit +// CHECK-EMPTY-NOT: header1.h:1:12: warning: +// CHECK-EXPLICIT: header1.h:1:12: warning: single-argument constructors must be marked explicit +// CHECK-NO-SYSTEM: header1.h:1:12: warning: single-argument constructors must be marked explicit +// CHECK-WITH-SYSTEM-DAG: header1.h:1:12: warning: single-argument constructors must be marked explicit + +#include <system-header.h> +// CHECK-DEFAULT-NOT: system-header.h:1:12: warning: +// CHECK-EMPTY-NOT: system-header.h:1:12: warning: +// CHECK-EXPLICIT-NOT: system-header.h:1:12: warning: +// CHECK-NO-SYSTEM-NOT: system-header.h:1:12: warning: +// CHECK-WITH-SYSTEM-DAG: system-header.h:1:12: warning: single-argument constructors must be marked explicit + +class A { A(int); }; +// CHECK-DEFAULT: :[[@LINE-1]]:11: warning: single-argument constructors must be marked explicit +// CHECK-EMPTY: :[[@LINE-2]]:11: warning: single-argument constructors must be marked explicit +// CHECK-EXPLICIT: :[[@LINE-3]]:11: warning: single-argument constructors must be marked explicit +// CHECK-NO-SYSTEM: :[[@LINE-4]]:11: warning: single-argument constructors must be marked explicit +// CHECK-WITH-SYSTEM: :[[@LINE-5]]:11: warning: single-argument constructors must be marked explicit diff --git a/clang-tools-extra/test/clang-tidy/infrastructure/file-filter.cpp b/clang-tools-extra/test/clang-tidy/infrastructure/file-filter.cpp index d9ec104..485e9fb 100644 --- a/clang-tools-extra/test/clang-tidy/infrastructure/file-filter.cpp +++ b/clang-tools-extra/test/clang-tidy/infrastructure/file-filter.cpp @@ -66,7 +66,7 @@ class A { A(int); }; // CHECK4-NOT: warning: // CHECK4-QUIET-NOT: warning: -// CHECK: Use -header-filter=.* to display errors from all non-system headers. +// CHECK: Use -header-filter=.* or leave it as default to display errors from all non-system headers. // CHECK-QUIET-NOT: Suppressed // CHECK2-QUIET-NOT: Suppressed // CHECK3: Use -header-filter=.* {{.*}} diff --git a/clang/docs/ThreadSafetyAnalysis.rst b/clang/docs/ThreadSafetyAnalysis.rst index 853a8fa..d0f96f5 100644 --- a/clang/docs/ThreadSafetyAnalysis.rst +++ b/clang/docs/ThreadSafetyAnalysis.rst @@ -118,7 +118,7 @@ require exclusive access, while read operations require only shared access. At any given moment during program execution, a thread holds a specific set of capabilities (e.g. the set of mutexes that it has locked.) These act like keys or tokens that allow the thread to access a given resource. Just like physical -security keys, a thread cannot make copy of a capability, nor can it destroy +security keys, a thread cannot make a copy of a capability, nor can it destroy one. A thread can only release a capability to another thread, or acquire one from another thread. The annotations are deliberately agnostic about the exact mechanism used to acquire and release capabilities; it assumes that the @@ -131,7 +131,7 @@ by calculating an approximation of that set, called the *capability environment*. The capability environment is calculated for every program point, and describes the set of capabilities that are statically known to be held, or not held, at that particular point. This environment is a conservative -approximation of the full set of capabilities that will actually held by a +approximation of the full set of capabilities that will actually be held by a thread at run-time. @@ -369,7 +369,7 @@ thread-safe, but too complicated for the analysis to understand. Reasons for void unsafeIncrement() NO_THREAD_SAFETY_ANALYSIS { a++; } }; -Unlike the other attributes, NO_THREAD_SAFETY_ANALYSIS is not part of the +Unlike the other attributes, ``NO_THREAD_SAFETY_ANALYSIS`` is not part of the interface of a function, and should thus be placed on the function definition (in the ``.cc`` or ``.cpp`` file) rather than on the function declaration (in the header). @@ -509,7 +509,7 @@ ASSERT_CAPABILITY(...) and ASSERT_SHARED_CAPABILITY(...) *Previously:* ``ASSERT_EXCLUSIVE_LOCK``, ``ASSERT_SHARED_LOCK`` These are attributes on a function or method which asserts the calling thread -already holds the given capability, for example by performing a run-time test +already holds the given capability, for example, by performing a run-time test and terminating if the capability is not held. Presence of this annotation causes the analysis to assume the capability is held after calls to the annotated function. See :ref:`mutexheader`, below, for example uses. @@ -554,19 +554,19 @@ Negative Capabilities ===================== Thread Safety Analysis is designed to prevent both race conditions and -deadlock. The GUARDED_BY and REQUIRES attributes prevent race conditions, by +deadlock. The ``GUARDED_BY`` and ``REQUIRES`` attributes prevent race conditions, by ensuring that a capability is held before reading or writing to guarded data, -and the EXCLUDES attribute prevents deadlock, by making sure that a mutex is +and the ``EXCLUDES`` attribute prevents deadlock, by making sure that a mutex is *not* held. -However, EXCLUDES is an optional attribute, and does not provide the same -safety guarantee as REQUIRES. In particular: +However, ``EXCLUDES`` is an optional attribute, and does not provide the same +safety guarantee as ``REQUIRES``. In particular: * A function which acquires a capability does not have to exclude it. * A function which calls a function that excludes a capability does not - have transitively exclude that capability. + have to transitively exclude that capability. -As a result, EXCLUDES can easily produce false negatives: +As a result, ``EXCLUDES`` can easily produce false negatives: .. code-block:: c++ @@ -594,8 +594,8 @@ As a result, EXCLUDES can easily produce false negatives: }; -Negative requirements are an alternative EXCLUDES that provide -a stronger safety guarantee. A negative requirement uses the REQUIRES +Negative requirements are an alternative to ``EXCLUDES`` that provide +a stronger safety guarantee. A negative requirement uses the ``REQUIRES`` attribute, in conjunction with the ``!`` operator, to indicate that a capability should *not* be held. @@ -642,7 +642,7 @@ Frequently Asked Questions (A) Attributes are part of the formal interface of a function, and should always go in the header, where they are visible to anything that includes -the header. Attributes in the .cpp file are not visible outside of the +the header. Attributes in the ``.cpp`` file are not visible outside of the immediate translation unit, which leads to false negatives and false positives. @@ -684,7 +684,7 @@ Private Mutexes --------------- Good software engineering practice dictates that mutexes should be private -members, because the locking mechanism used by a thread-safe class is part of +members because the locking mechanism used by a thread-safe class is part of its internal implementation. However, private mutexes can sometimes leak into the public interface of a class. Thread safety attributes follow normal C++ access restrictions, so if ``mu`` diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td index a2c2021..2b400b0 100644 --- a/clang/include/clang/Basic/Builtins.td +++ b/clang/include/clang/Basic/Builtins.td @@ -5030,6 +5030,12 @@ def HLSLWaveActiveMax : LangBuiltin<"HLSL_LANG"> { let Prototype = "void (...)"; } +def HLSLWaveActiveMin : LangBuiltin<"HLSL_LANG"> { + let Spellings = ["__builtin_hlsl_wave_active_min"]; + let Attributes = [NoThrow, Const]; + let Prototype = "void (...)"; +} + def HLSLWaveActiveSum : LangBuiltin<"HLSL_LANG"> { let Spellings = ["__builtin_hlsl_wave_active_sum"]; let Attributes = [NoThrow, Const]; diff --git a/clang/include/clang/Frontend/TextDiagnostic.h b/clang/include/clang/Frontend/TextDiagnostic.h index e2e88d4..10028186d 100644 --- a/clang/include/clang/Frontend/TextDiagnostic.h +++ b/clang/include/clang/Frontend/TextDiagnostic.h @@ -16,10 +16,12 @@ #define LLVM_CLANG_FRONTEND_TEXTDIAGNOSTIC_H #include "clang/Frontend/DiagnosticRenderer.h" -#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/FormattedStream.h" namespace clang { +using llvm::formatted_raw_ostream; + /// Class to encapsulate the logic for formatting and printing a textual /// diagnostic message. /// @@ -33,7 +35,7 @@ namespace clang { /// DiagnosticClient is implemented through this class as is diagnostic /// printing coming out of libclang. class TextDiagnostic : public DiagnosticRenderer { - raw_ostream &OS; + formatted_raw_ostream OS; const Preprocessor *PP; public: @@ -47,7 +49,7 @@ public: unsigned End; enum llvm::raw_ostream::Colors Color; StyleRange(unsigned S, unsigned E, enum llvm::raw_ostream::Colors C) - : Start(S), End(E), Color(C){}; + : Start(S), End(E), Color(C) {}; }; /// Print the diagonstic level to a raw_ostream. diff --git a/clang/include/clang/Parse/Parser.h b/clang/include/clang/Parse/Parser.h index 0d2316f..dad8efd0 100644 --- a/clang/include/clang/Parse/Parser.h +++ b/clang/include/clang/Parse/Parser.h @@ -7677,7 +7677,7 @@ private: /// [GNU] asm-clobbers: /// asm-string-literal /// asm-clobbers ',' asm-string-literal - /// \endverbatim + /// \endverbatim /// StmtResult ParseAsmStatement(bool &msAsm); diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp index ab6b3ed..b3ab82d 100644 --- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp +++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp @@ -859,7 +859,7 @@ static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, APSInt RHS = popToAPSInt(S.Stk, RHST); APSInt LHS = popToAPSInt(S.Stk, LHST); - if (CarryOutPtr.isDummy()) + if (CarryOutPtr.isDummy() || !CarryOutPtr.isBlockPointer()) return false; APSInt CarryOut; diff --git a/clang/lib/Basic/Targets/BPF.cpp b/clang/lib/Basic/Targets/BPF.cpp index 0411bcc..8de1083 100644 --- a/clang/lib/Basic/Targets/BPF.cpp +++ b/clang/lib/Basic/Targets/BPF.cpp @@ -75,6 +75,7 @@ void BPFTargetInfo::getTargetDefines(const LangOptions &Opts, Builder.defineMacro("__BPF_FEATURE_GOTOL"); Builder.defineMacro("__BPF_FEATURE_ST"); Builder.defineMacro("__BPF_FEATURE_LOAD_ACQ_STORE_REL"); + Builder.defineMacro("__BPF_FEATURE_GOTOX"); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 50d585d..e5066fa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -108,11 +108,11 @@ public: cir::LongDoubleType getLongDoubleTy(const llvm::fltSemantics &format) const { if (&format == &llvm::APFloat::IEEEdouble()) - return cir::LongDoubleType::get(getContext(), typeCache.DoubleTy); + return cir::LongDoubleType::get(getContext(), typeCache.doubleTy); if (&format == &llvm::APFloat::x87DoubleExtended()) - return cir::LongDoubleType::get(getContext(), typeCache.FP80Ty); + return cir::LongDoubleType::get(getContext(), typeCache.fP80Ty); if (&format == &llvm::APFloat::IEEEquad()) - return cir::LongDoubleType::get(getContext(), typeCache.FP128Ty); + return cir::LongDoubleType::get(getContext(), typeCache.fP128Ty); if (&format == &llvm::APFloat::PPCDoubleDouble()) llvm_unreachable("NYI: PPC double-double format for long double"); llvm_unreachable("Unsupported format for long double"); @@ -258,17 +258,17 @@ public: } } - cir::VoidType getVoidTy() { return typeCache.VoidTy; } + cir::VoidType getVoidTy() { return typeCache.voidTy; } - cir::IntType getSInt8Ty() { return typeCache.SInt8Ty; } - cir::IntType getSInt16Ty() { return typeCache.SInt16Ty; } - cir::IntType getSInt32Ty() { return typeCache.SInt32Ty; } - cir::IntType getSInt64Ty() { return typeCache.SInt64Ty; } + cir::IntType getSInt8Ty() { return typeCache.sInt8Ty; } + cir::IntType getSInt16Ty() { return typeCache.sInt16Ty; } + cir::IntType getSInt32Ty() { return typeCache.sInt32Ty; } + cir::IntType getSInt64Ty() { return typeCache.sInt64Ty; } - cir::IntType getUInt8Ty() { return typeCache.UInt8Ty; } - cir::IntType getUInt16Ty() { return typeCache.UInt16Ty; } - cir::IntType getUInt32Ty() { return typeCache.UInt32Ty; } - cir::IntType getUInt64Ty() { return typeCache.UInt64Ty; } + cir::IntType getUInt8Ty() { return typeCache.uInt8Ty; } + cir::IntType getUInt16Ty() { return typeCache.uInt16Ty; } + cir::IntType getUInt32Ty() { return typeCache.uInt32Ty; } + cir::IntType getUInt64Ty() { return typeCache.uInt64Ty; } cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal); @@ -280,21 +280,21 @@ public: llvm::APFloat fpVal); bool isInt8Ty(mlir::Type i) { - return i == typeCache.UInt8Ty || i == typeCache.SInt8Ty; + return i == typeCache.uInt8Ty || i == typeCache.sInt8Ty; } bool isInt16Ty(mlir::Type i) { - return i == typeCache.UInt16Ty || i == typeCache.SInt16Ty; + return i == typeCache.uInt16Ty || i == typeCache.sInt16Ty; } bool isInt32Ty(mlir::Type i) { - return i == typeCache.UInt32Ty || i == typeCache.SInt32Ty; + return i == typeCache.uInt32Ty || i == typeCache.sInt32Ty; } bool isInt64Ty(mlir::Type i) { - return i == typeCache.UInt64Ty || i == typeCache.SInt64Ty; + return i == typeCache.uInt64Ty || i == typeCache.sInt64Ty; } bool isInt(mlir::Type i) { return mlir::isa<cir::IntType>(i); } // Fetch the type representing a pointer to unsigned int8 values. - cir::PointerType getUInt8PtrTy() { return typeCache.UInt8PtrTy; } + cir::PointerType getUInt8PtrTy() { return typeCache.uInt8PtrTy; } /// Get a CIR anonymous record type. cir::RecordType getAnonRecordTy(llvm::ArrayRef<mlir::Type> members, diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 5046e09..a829678 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -362,7 +362,7 @@ static Address applyNonVirtualAndVirtualOffset( // not bytes. So the pointer must be cast to a byte pointer and back. mlir::Value ptr = addr.getPointer(); - mlir::Type charPtrType = cgf.cgm.UInt8PtrTy; + mlir::Type charPtrType = cgf.cgm.uInt8PtrTy; mlir::Value charPtr = cgf.getBuilder().createBitcast(ptr, charPtrType); mlir::Value adjusted = cir::PtrStrideOp::create( cgf.getBuilder(), loc, charPtrType, charPtr, baseOffset); @@ -1105,7 +1105,7 @@ mlir::Value CIRGenFunction::getVTTParameter(GlobalDecl gd, bool forVirtualBase, // We're the complete constructor, so get the VTT by name. cir::GlobalOp vtt = cgm.getVTables().getAddrOfVTT(rd); return builder.createVTTAddrPoint( - loc, builder.getPointerTo(cgm.VoidPtrTy), + loc, builder.getPointerTo(cgm.voidPtrTy), mlir::FlatSymbolRefAttr::get(vtt.getSymNameAttr()), subVTTIndex); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 8723a6e..930ae55 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -55,7 +55,7 @@ cir::CallOp CIRGenFunction::emitCoroIDBuiltinCall(mlir::Location loc, if (!builtin) { fnOp = cgm.createCIRBuiltinFunction( loc, cgm.builtinCoroId, - cir::FuncType::get({int32Ty, VoidPtrTy, VoidPtrTy, VoidPtrTy}, int32Ty), + cir::FuncType::get({int32Ty, voidPtrTy, voidPtrTy, voidPtrTy}, int32Ty), /*FD=*/nullptr); assert(fnOp && "should always succeed"); } else { @@ -75,7 +75,7 @@ cir::CallOp CIRGenFunction::emitCoroAllocBuiltinCall(mlir::Location loc) { cir::FuncOp fnOp; if (!builtin) { fnOp = cgm.createCIRBuiltinFunction(loc, cgm.builtinCoroAlloc, - cir::FuncType::get({UInt32Ty}, boolTy), + cir::FuncType::get({uInt32Ty}, boolTy), /*fd=*/nullptr); assert(fnOp && "should always succeed"); } else { @@ -95,7 +95,7 @@ CIRGenFunction::emitCoroBeginBuiltinCall(mlir::Location loc, if (!builtin) { fnOp = cgm.createCIRBuiltinFunction( loc, cgm.builtinCoroBegin, - cir::FuncType::get({UInt32Ty, VoidPtrTy}, VoidPtrTy), + cir::FuncType::get({uInt32Ty, voidPtrTy}, voidPtrTy), /*fd=*/nullptr); assert(fnOp && "should always succeed"); } else { @@ -110,7 +110,7 @@ CIRGenFunction::emitCoroBeginBuiltinCall(mlir::Location loc, mlir::LogicalResult CIRGenFunction::emitCoroutineBody(const CoroutineBodyStmt &s) { mlir::Location openCurlyLoc = getLoc(s.getBeginLoc()); - cir::ConstantOp nullPtrCst = builder.getNullPtr(VoidPtrTy, openCurlyLoc); + cir::ConstantOp nullPtrCst = builder.getNullPtr(voidPtrTy, openCurlyLoc); auto fn = mlir::cast<cir::FuncOp>(curFn); fn.setCoroutine(true); diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 5667273..aeea0ef 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -80,13 +80,13 @@ CIRGenFunction::emitAutoVarAlloca(const VarDecl &d, assert(!cir::MissingFeatures::openMP()); if (!didCallStackSave) { // Save the stack. - cir::PointerType defaultTy = AllocaInt8PtrTy; + cir::PointerType defaultTy = allocaInt8PtrTy; CharUnits align = CharUnits::fromQuantity( cgm.getDataLayout().getAlignment(defaultTy, false)); Address stack = createTempAlloca(defaultTy, align, loc, "saved_stack"); mlir::Value v = builder.createStackSave(loc, defaultTy); - assert(v.getType() == AllocaInt8PtrTy); + assert(v.getType() == allocaInt8PtrTy); builder.createStore(loc, v, stack); didCallStackSave = true; diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index df6ee56..5ccb431 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2529,7 +2529,7 @@ CIRGenFunction::emitConditionalBlocks(const AbstractConditionalOperator *e, // If both arms are void, so be it. if (!yieldTy) - yieldTy = VoidTy; + yieldTy = voidTy; // Insert required yields. for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp index 8fe0d9b4..3d3030c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp @@ -490,7 +490,7 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy, for (uint64_t i = 0; i != numInitElements; ++i) { // Advance to the next element. if (i > 0) { - one = builder.getConstantInt(loc, cgf.PtrDiffTy, i); + one = builder.getConstantInt(loc, cgf.ptrDiffTy, i); element = builder.createPtrStride(loc, begin, one); } @@ -512,7 +512,7 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy, cgf.getTypes().isZeroInitializable(elementType))) { // Advance to the start of the rest of the array. if (numInitElements) { - one = builder.getConstantInt(loc, cgf.PtrDiffTy, 1); + one = builder.getConstantInt(loc, cgf.ptrDiffTy, 1); element = cir::PtrStrideOp::create(builder, loc, cirElementPtrType, element, one); } @@ -526,7 +526,7 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy, // Compute the end of array cir::ConstantOp numArrayElementsConst = builder.getConstInt( - loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), numArrayElements); + loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), numArrayElements); mlir::Value end = cir::PtrStrideOp::create(builder, loc, cirElementPtrType, begin, numArrayElementsConst); @@ -563,7 +563,7 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy, // Advance pointer and store them to temporary variable cir::ConstantOp one = builder.getConstInt( - loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), 1); + loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), 1); auto nextElement = cir::PtrStrideOp::create( builder, loc, cirElementPtrType, currentElement, one); cgf.emitStoreThroughLValue(RValue::get(nextElement), tmpLV); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 7a35382..9dd9b6d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -257,12 +257,12 @@ static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e, if (!e->isArray()) { CharUnits typeSize = cgf.getContext().getTypeSizeInChars(type); sizeWithoutCookie = cgf.getBuilder().getConstant( - loc, cir::IntAttr::get(cgf.SizeTy, typeSize.getQuantity())); + loc, cir::IntAttr::get(cgf.sizeTy, typeSize.getQuantity())); return sizeWithoutCookie; } // The width of size_t. - unsigned sizeWidth = cgf.cgm.getDataLayout().getTypeSizeInBits(cgf.SizeTy); + unsigned sizeWidth = cgf.cgm.getDataLayout().getTypeSizeInBits(cgf.sizeTy); // The number of elements can be have an arbitrary integer type; // essentially, we need to multiply it by a constant factor, add a diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp index 928e5aa..6af87a0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp @@ -46,7 +46,7 @@ namespace { class ConstExprEmitter; static mlir::TypedAttr computePadding(CIRGenModule &cgm, CharUnits size) { - mlir::Type eltTy = cgm.UCharTy; + mlir::Type eltTy = cgm.uCharTy; clang::CharUnits::QuantityType arSize = size.getQuantity(); CIRGenBuilderTy &bld = cgm.getBuilder(); if (size > CharUnits::One()) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index db6878d..119314f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -762,9 +762,9 @@ public: // FIXME(cir): For now lets pretend we shouldn't use the conversion // intrinsics and insert a cast here unconditionally. src = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, src, - cgf.FloatTy); + cgf.floatTy); srcType = cgf.getContext().FloatTy; - mlirSrcType = cgf.FloatTy; + mlirSrcType = cgf.floatTy; } } @@ -1738,7 +1738,7 @@ mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) { // // See more in `EmitSub` in CGExprScalar.cpp. assert(!cir::MissingFeatures::llvmLoweringPtrDiffConsidersPointee()); - return cir::PtrDiffOp::create(builder, cgf.getLoc(ops.loc), cgf.PtrDiffTy, + return cir::PtrDiffOp::create(builder, cgf.getLoc(ops.loc), cgf.ptrDiffTy, ops.lhs, ops.rhs); } @@ -2220,7 +2220,7 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( "sizeof operator for VariableArrayType", e->getStmtClassName()); return builder.getConstant( - loc, cir::IntAttr::get(cgf.cgm.UInt64Ty, + loc, cir::IntAttr::get(cgf.cgm.uInt64Ty, llvm::APSInt(llvm::APInt(64, 1), true))); } } else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) { @@ -2228,12 +2228,12 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( e->getSourceRange(), "sizeof operator for OpenMpRequiredSimdAlign", e->getStmtClassName()); return builder.getConstant( - loc, cir::IntAttr::get(cgf.cgm.UInt64Ty, + loc, cir::IntAttr::get(cgf.cgm.uInt64Ty, llvm::APSInt(llvm::APInt(64, 1), true))); } return builder.getConstant( - loc, cir::IntAttr::get(cgf.cgm.UInt64Ty, + loc, cir::IntAttr::get(cgf.cgm.uInt64Ty, e->EvaluateKnownConstInt(cgf.getContext()))); } @@ -2329,14 +2329,14 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( mlir::Value lhs = Visit(lhsExpr); if (!lhs) { - lhs = builder.getNullValue(cgf.VoidTy, loc); + lhs = builder.getNullValue(cgf.voidTy, loc); lhsIsVoid = true; } mlir::Value rhs = Visit(rhsExpr); if (lhsIsVoid) { assert(!rhs && "lhs and rhs types must match"); - rhs = builder.getNullValue(cgf.VoidTy, loc); + rhs = builder.getNullValue(cgf.voidTy, loc); } return builder.createSelect(loc, condV, lhs, rhs); @@ -2381,7 +2381,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( if (!insertPoints.empty()) { // If both arms are void, so be it. if (!yieldTy) - yieldTy = cgf.VoidTy; + yieldTy = cgf.voidTy; // Insert required yields. for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 58feb36..71ff20a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1008,7 +1008,7 @@ CIRGenFunction::emitArrayLength(const clang::ArrayType *origArrayType, if (isa<VariableArrayType>(arrayType)) { assert(cir::MissingFeatures::vlas()); cgm.errorNYI(*currSrcLoc, "VLAs"); - return builder.getConstInt(*currSrcLoc, SizeTy, 0); + return builder.getConstInt(*currSrcLoc, sizeTy, 0); } uint64_t countFromCLAs = 1; @@ -1037,7 +1037,7 @@ CIRGenFunction::emitArrayLength(const clang::ArrayType *origArrayType, } baseType = eltType; - return builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs); + return builder.getConstInt(*currSrcLoc, sizeTy, countFromCLAs); } mlir::Value CIRGenFunction::emitAlignmentAssumption( @@ -1074,7 +1074,7 @@ CIRGenFunction::getVLASize(const VariableArrayType *type) { elementType = type->getElementType(); mlir::Value vlaSize = vlaSizeMap[type->getSizeExpr()]; assert(vlaSize && "no size for VLA!"); - assert(vlaSize.getType() == SizeTy); + assert(vlaSize.getType() == sizeTy); if (!numElements) { numElements = vlaSize; @@ -1188,7 +1188,7 @@ void CIRGenFunction::emitVariablyModifiedType(QualType type) { // Always zexting here would be wrong if it weren't // undefined behavior to have a negative bound. // FIXME: What about when size's type is larger than size_t? - entry = builder.createIntCast(size, SizeTy); + entry = builder.createIntCast(size, sizeTy); } } type = vat->getElementType(); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 88fedf1..f603f5ec 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1846,13 +1846,13 @@ mlir::Value CIRGenItaniumCXXABI::getVirtualBaseClassOffset( const CXXRecordDecl *classDecl, const CXXRecordDecl *baseClassDecl) { CIRGenBuilderTy &builder = cgf.getBuilder(); mlir::Value vtablePtr = cgf.getVTablePtr(loc, thisAddr, classDecl); - mlir::Value vtableBytePtr = builder.createBitcast(vtablePtr, cgm.UInt8PtrTy); + mlir::Value vtableBytePtr = builder.createBitcast(vtablePtr, cgm.uInt8PtrTy); CharUnits vbaseOffsetOffset = cgm.getItaniumVTableContext().getVirtualBaseOffsetOffset(classDecl, baseClassDecl); mlir::Value offsetVal = builder.getSInt64(vbaseOffsetOffset.getQuantity(), loc); - auto vbaseOffsetPtr = cir::PtrStrideOp::create(builder, loc, cgm.UInt8PtrTy, + auto vbaseOffsetPtr = cir::PtrStrideOp::create(builder, loc, cgm.uInt8PtrTy, vtableBytePtr, offsetVal); mlir::Value vbaseOffset; @@ -1861,9 +1861,9 @@ mlir::Value CIRGenItaniumCXXABI::getVirtualBaseClassOffset( cgm.errorNYI(loc, "getVirtualBaseClassOffset: relative layout"); } else { mlir::Value offsetPtr = builder.createBitcast( - vbaseOffsetPtr, builder.getPointerTo(cgm.PtrDiffTy)); + vbaseOffsetPtr, builder.getPointerTo(cgm.ptrDiffTy)); vbaseOffset = builder.createLoad( - loc, Address(offsetPtr, cgm.PtrDiffTy, cgf.getPointerAlign())); + loc, Address(offsetPtr, cgm.ptrDiffTy, cgf.getPointerAlign())); } return vbaseOffset; } @@ -2244,7 +2244,7 @@ Address CIRGenItaniumCXXABI::initializeArrayCookie(CIRGenFunction &cgf, // Write the number of elements into the appropriate slot. Address numElementsPtr = - cookiePtr.withElementType(cgf.getBuilder(), cgf.SizeTy); + cookiePtr.withElementType(cgf.getBuilder(), cgf.sizeTy); cgf.getBuilder().createStore(loc, numElements, numElementsPtr); // Finally, compute a pointer to the actual data buffer by skipping diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 46adfe2..9f9b2db 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -67,28 +67,28 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, abi(createCXXABI(*this)), genTypes(*this), vtables(*this) { // Initialize cached types - VoidTy = cir::VoidType::get(&getMLIRContext()); - VoidPtrTy = cir::PointerType::get(VoidTy); - SInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/true); - SInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/true); - SInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/true); - SInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/true); - SInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/true); - UInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/false); - UInt8PtrTy = cir::PointerType::get(UInt8Ty); + voidTy = cir::VoidType::get(&getMLIRContext()); + voidPtrTy = cir::PointerType::get(voidTy); + sInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/true); + sInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/true); + sInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/true); + sInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/true); + sInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/true); + uInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/false); + uInt8PtrTy = cir::PointerType::get(uInt8Ty); cirAllocaAddressSpace = getTargetCIRGenInfo().getCIRAllocaAddressSpace(); - UInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/false); - UInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/false); - UInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/false); - UInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/false); - FP16Ty = cir::FP16Type::get(&getMLIRContext()); - BFloat16Ty = cir::BF16Type::get(&getMLIRContext()); - FloatTy = cir::SingleType::get(&getMLIRContext()); - DoubleTy = cir::DoubleType::get(&getMLIRContext()); - FP80Ty = cir::FP80Type::get(&getMLIRContext()); - FP128Ty = cir::FP128Type::get(&getMLIRContext()); - - AllocaInt8PtrTy = cir::PointerType::get(UInt8Ty, cirAllocaAddressSpace); + uInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/false); + uInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/false); + uInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/false); + uInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/false); + fP16Ty = cir::FP16Type::get(&getMLIRContext()); + bFloat16Ty = cir::BF16Type::get(&getMLIRContext()); + floatTy = cir::SingleType::get(&getMLIRContext()); + doubleTy = cir::DoubleType::get(&getMLIRContext()); + fP80Ty = cir::FP80Type::get(&getMLIRContext()); + fP128Ty = cir::FP128Type::get(&getMLIRContext()); + + allocaInt8PtrTy = cir::PointerType::get(uInt8Ty, cirAllocaAddressSpace); PointerAlignInBytes = astContext @@ -97,16 +97,16 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, .getQuantity(); const unsigned charSize = astContext.getTargetInfo().getCharWidth(); - UCharTy = cir::IntType::get(&getMLIRContext(), charSize, /*isSigned=*/false); + uCharTy = cir::IntType::get(&getMLIRContext(), charSize, /*isSigned=*/false); // TODO(CIR): Should be updated once TypeSizeInfoAttr is upstreamed const unsigned sizeTypeSize = astContext.getTypeSize(astContext.getSignedSizeType()); SizeSizeInBytes = astContext.toCharUnitsFromBits(sizeTypeSize).getQuantity(); // In CIRGenTypeCache, UIntPtrTy and SizeType are fields of the same union - UIntPtrTy = + uIntPtrTy = cir::IntType::get(&getMLIRContext(), sizeTypeSize, /*isSigned=*/false); - PtrDiffTy = + ptrDiffTy = cir::IntType::get(&getMLIRContext(), sizeTypeSize, /*isSigned=*/true); std::optional<cir::SourceLanguage> sourceLanguage = getCIRSourceLanguage(); diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp index be063033..890f8a6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp @@ -617,11 +617,11 @@ void OpenACCRecipeBuilderBase::createReductionRecipeCombiner( if (const auto *cat = cgf.getContext().getAsConstantArrayType(origType)) { // If we're in an array, we have to emit the combiner for each element of // the array. - auto itrTy = mlir::cast<cir::IntType>(cgf.PtrDiffTy); + auto itrTy = mlir::cast<cir::IntType>(cgf.ptrDiffTy); auto itrPtrTy = cir::PointerType::get(itrTy); mlir::Value zero = - builder.getConstInt(loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), 0); + builder.getConstInt(loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), 0); mlir::Value itr = cir::AllocaOp::create(builder, loc, itrPtrTy, itrTy, "itr", cgf.cgm.getSize(cgf.getPointerAlign())); @@ -633,7 +633,7 @@ void OpenACCRecipeBuilderBase::createReductionRecipeCombiner( [&](mlir::OpBuilder &b, mlir::Location loc) { auto loadItr = cir::LoadOp::create(builder, loc, {itr}); mlir::Value arraySize = builder.getConstInt( - loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), cat->getZExtSize()); + loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), cat->getZExtSize()); auto cmp = builder.createCompare(loc, cir::CmpOpKind::lt, loadItr, arraySize); builder.createCondition(cmp); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index ff5842c..0f63e91 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -26,47 +26,47 @@ struct CIRGenTypeCache { CIRGenTypeCache() {} // ClangIR void type - cir::VoidType VoidTy; + cir::VoidType voidTy; // ClangIR signed integral types of common sizes - cir::IntType SInt8Ty; - cir::IntType SInt16Ty; - cir::IntType SInt32Ty; - cir::IntType SInt64Ty; - cir::IntType SInt128Ty; + cir::IntType sInt8Ty; + cir::IntType sInt16Ty; + cir::IntType sInt32Ty; + cir::IntType sInt64Ty; + cir::IntType sInt128Ty; // ClangIR unsigned integral type of common sizes - cir::IntType UInt8Ty; - cir::IntType UInt16Ty; - cir::IntType UInt32Ty; - cir::IntType UInt64Ty; - cir::IntType UInt128Ty; + cir::IntType uInt8Ty; + cir::IntType uInt16Ty; + cir::IntType uInt32Ty; + cir::IntType uInt64Ty; + cir::IntType uInt128Ty; // ClangIR floating-point types with fixed formats - cir::FP16Type FP16Ty; - cir::BF16Type BFloat16Ty; - cir::SingleType FloatTy; - cir::DoubleType DoubleTy; - cir::FP80Type FP80Ty; - cir::FP128Type FP128Ty; + cir::FP16Type fP16Ty; + cir::BF16Type bFloat16Ty; + cir::SingleType floatTy; + cir::DoubleType doubleTy; + cir::FP80Type fP80Ty; + cir::FP128Type fP128Ty; /// ClangIR char - mlir::Type UCharTy; + mlir::Type uCharTy; /// intptr_t, size_t, and ptrdiff_t, which we assume are the same size. union { - mlir::Type UIntPtrTy; - mlir::Type SizeTy; + mlir::Type uIntPtrTy; + mlir::Type sizeTy; }; - mlir::Type PtrDiffTy; + mlir::Type ptrDiffTy; /// void* in address space 0 - cir::PointerType VoidPtrTy; - cir::PointerType UInt8PtrTy; + cir::PointerType voidPtrTy; + cir::PointerType uInt8PtrTy; /// void* in alloca address space - cir::PointerType AllocaInt8PtrTy; + cir::PointerType allocaInt8PtrTy; /// The size and alignment of a pointer into the generic address space. union { diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index d1b91d0..03618d4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -71,7 +71,7 @@ mlir::Type CIRGenTypes::convertFunctionTypeInternal(QualType qft) { if (!isFuncTypeConvertible(ft)) { cgm.errorNYI(SourceLocation(), "function type involving an incomplete type", qft); - return cir::FuncType::get(SmallVector<mlir::Type, 1>{}, cgm.VoidTy); + return cir::FuncType::get(SmallVector<mlir::Type, 1>{}, cgm.voidTy); } const CIRGenFunctionInfo *fi; @@ -298,7 +298,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) { switch (cast<BuiltinType>(ty)->getKind()) { // void case BuiltinType::Void: - resultType = cgm.VoidTy; + resultType = cgm.voidTy; break; // bool @@ -338,42 +338,42 @@ mlir::Type CIRGenTypes::convertType(QualType type) { // Floating-point types case BuiltinType::Float16: - resultType = cgm.FP16Ty; + resultType = cgm.fP16Ty; break; case BuiltinType::Half: if (astContext.getLangOpts().NativeHalfType || !astContext.getTargetInfo().useFP16ConversionIntrinsics()) { - resultType = cgm.FP16Ty; + resultType = cgm.fP16Ty; } else { cgm.errorNYI(SourceLocation(), "processing of built-in type", type); - resultType = cgm.SInt32Ty; + resultType = cgm.sInt32Ty; } break; case BuiltinType::BFloat16: - resultType = cgm.BFloat16Ty; + resultType = cgm.bFloat16Ty; break; case BuiltinType::Float: assert(&astContext.getFloatTypeSemantics(type) == &llvm::APFloat::IEEEsingle() && "ClangIR NYI: 'float' in a format other than IEEE 32-bit"); - resultType = cgm.FloatTy; + resultType = cgm.floatTy; break; case BuiltinType::Double: assert(&astContext.getFloatTypeSemantics(type) == &llvm::APFloat::IEEEdouble() && "ClangIR NYI: 'double' in a format other than IEEE 64-bit"); - resultType = cgm.DoubleTy; + resultType = cgm.doubleTy; break; case BuiltinType::LongDouble: resultType = builder.getLongDoubleTy(astContext.getFloatTypeSemantics(type)); break; case BuiltinType::Float128: - resultType = cgm.FP128Ty; + resultType = cgm.fP128Ty; break; case BuiltinType::Ibm128: cgm.errorNYI(SourceLocation(), "processing of built-in type", type); - resultType = cgm.SInt32Ty; + resultType = cgm.sInt32Ty; break; case BuiltinType::NullPtr: @@ -386,7 +386,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) { default: cgm.errorNYI(SourceLocation(), "processing of built-in type", type); - resultType = cgm.SInt32Ty; + resultType = cgm.sInt32Ty; break; } break; @@ -439,7 +439,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) { // int X[] -> [0 x int], unless the element type is not sized. If it is // unsized (e.g. an incomplete record) just use [0 x i8]. if (!cir::isSized(elemTy)) { - elemTy = cgm.SInt8Ty; + elemTy = cgm.sInt8Ty; } resultType = cir::ArrayType::get(elemTy, 0); @@ -454,7 +454,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) { // i8 just to have a concrete type" if (!cir::isSized(elemTy)) { cgm.errorNYI(SourceLocation(), "arrays of undefined struct type", type); - resultType = cgm.UInt32Ty; + resultType = cgm.uInt32Ty; break; } @@ -477,7 +477,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) { // Return a placeholder 'i32' type. This can be changed later when the // type is defined (see UpdateCompletedType), but is likely to be the // "right" answer. - resultType = cgm.UInt32Ty; + resultType = cgm.uInt32Ty; break; } @@ -490,7 +490,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) { const auto *bitIntTy = cast<BitIntType>(type); if (bitIntTy->getNumBits() > cir::IntType::maxBitwidth()) { cgm.errorNYI(SourceLocation(), "large _BitInt type", type); - resultType = cgm.SInt32Ty; + resultType = cgm.sInt32Ty; } else { resultType = cir::IntType::get(&getMLIRContext(), bitIntTy->getNumBits(), bitIntTy->isSigned()); @@ -515,7 +515,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) { default: cgm.errorNYI(SourceLocation(), "processing of type", type->getTypeClassName()); - resultType = cgm.SInt32Ty; + resultType = cgm.sInt32Ty; break; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 2d2ef42..7ba03ce 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -286,14 +286,14 @@ void cir::ConditionOp::getSuccessorRegions( // Parent is a loop: condition may branch to the body or to the parent op. if (auto loopOp = dyn_cast<LoopOpInterface>(getOperation()->getParentOp())) { regions.emplace_back(&loopOp.getBody(), loopOp.getBody().getArguments()); - regions.emplace_back(loopOp->getResults()); + regions.emplace_back(getOperation(), loopOp->getResults()); } assert(!cir::MissingFeatures::awaitOp()); } MutableOperandRange -cir::ConditionOp::getMutableSuccessorOperands(RegionBranchPoint point) { +cir::ConditionOp::getMutableSuccessorOperands(RegionSuccessor point) { // No values are yielded to the successor region. return MutableOperandRange(getOperation(), 0, 0); } @@ -989,7 +989,8 @@ void cir::IfOp::getSuccessorRegions(mlir::RegionBranchPoint point, SmallVectorImpl<RegionSuccessor> ®ions) { // The `then` and the `else` region branch back to the parent operation. if (!point.isParent()) { - regions.push_back(RegionSuccessor()); + regions.push_back( + RegionSuccessor(getOperation(), getOperation()->getResults())); return; } @@ -1039,7 +1040,7 @@ void cir::ScopeOp::getSuccessorRegions( mlir::RegionBranchPoint point, SmallVectorImpl<RegionSuccessor> ®ions) { // The only region always branch back to the parent operation. if (!point.isParent()) { - regions.push_back(RegionSuccessor(getODSResults(0))); + regions.push_back(RegionSuccessor(getOperation(), getODSResults(0))); return; } @@ -1124,7 +1125,8 @@ Block *cir::BrCondOp::getSuccessorForOperands(ArrayRef<Attribute> operands) { void cir::CaseOp::getSuccessorRegions( mlir::RegionBranchPoint point, SmallVectorImpl<RegionSuccessor> ®ions) { if (!point.isParent()) { - regions.push_back(RegionSuccessor()); + regions.push_back( + RegionSuccessor(getOperation(), getOperation()->getResults())); return; } regions.push_back(RegionSuccessor(&getCaseRegion())); @@ -1188,7 +1190,8 @@ static void printSwitchOp(OpAsmPrinter &p, cir::SwitchOp op, void cir::SwitchOp::getSuccessorRegions( mlir::RegionBranchPoint point, SmallVectorImpl<RegionSuccessor> ®ion) { if (!point.isParent()) { - region.push_back(RegionSuccessor()); + region.push_back( + RegionSuccessor(getOperation(), getOperation()->getResults())); return; } @@ -1402,7 +1405,8 @@ void cir::GlobalOp::getSuccessorRegions( mlir::RegionBranchPoint point, SmallVectorImpl<RegionSuccessor> ®ions) { // The `ctor` and `dtor` regions always branch back to the parent operation. if (!point.isParent()) { - regions.push_back(RegionSuccessor()); + regions.push_back( + RegionSuccessor(getOperation(), getOperation()->getResults())); return; } @@ -1961,7 +1965,7 @@ void cir::TernaryOp::getSuccessorRegions( mlir::RegionBranchPoint point, SmallVectorImpl<RegionSuccessor> ®ions) { // The `true` and the `false` region branch back to the parent operation. if (!point.isParent()) { - regions.push_back(RegionSuccessor(this->getODSResults(0))); + regions.push_back(RegionSuccessor(getOperation(), this->getODSResults(0))); return; } @@ -2978,7 +2982,8 @@ void cir::TryOp::getSuccessorRegions( llvm::SmallVectorImpl<mlir::RegionSuccessor> ®ions) { // The `try` and the `catchers` region branch back to the parent operation. if (!point.isParent()) { - regions.push_back(mlir::RegionSuccessor()); + regions.push_back( + RegionSuccessor(getOperation(), getOperation()->getResults())); return; } diff --git a/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp b/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp index 0ce5017..6de51f12 100644 --- a/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp +++ b/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp @@ -17,7 +17,7 @@ namespace cir { void LoopOpInterface::getLoopOpSuccessorRegions( LoopOpInterface op, mlir::RegionBranchPoint point, llvm::SmallVectorImpl<mlir::RegionSuccessor> ®ions) { - assert(point.isParent() || point.getRegionOrNull()); + assert(point.isParent() || point.getTerminatorPredecessorOrNull()); // Branching to first region: go to condition or body (do-while). if (point.isParent()) { @@ -25,15 +25,18 @@ void LoopOpInterface::getLoopOpSuccessorRegions( return; } + mlir::Region *parentRegion = + point.getTerminatorPredecessorOrNull()->getParentRegion(); + // Branching from condition: go to body or exit. - if (&op.getCond() == point.getRegionOrNull()) { - regions.emplace_back(mlir::RegionSuccessor(op->getResults())); + if (&op.getCond() == parentRegion) { + regions.emplace_back(mlir::RegionSuccessor(op, op->getResults())); regions.emplace_back(&op.getBody(), op.getBody().getArguments()); return; } // Branching from body: go to step (for) or condition. - if (&op.getBody() == point.getRegionOrNull()) { + if (&op.getBody() == parentRegion) { // FIXME(cir): Should we consider break/continue statements here? mlir::Region *afterBody = (op.maybeGetStep() ? op.maybeGetStep() : &op.getCond()); @@ -42,7 +45,7 @@ void LoopOpInterface::getLoopOpSuccessorRegions( } // Branching from step: go to condition. - if (op.maybeGetStep() == point.getRegionOrNull()) { + if (op.maybeGetStep() == parentRegion) { regions.emplace_back(&op.getCond(), op.getCond().getArguments()); return; } diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index 301d577..01f2161 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -2297,9 +2297,13 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr, CGM.getABIInfo().getOptimalVectorMemoryType(VecTy, getLangOpts()); if (!ClangVecTy->isPackedVectorBoolType(getContext()) && VecTy != NewVecTy) { - SmallVector<int, 16> Mask(NewVecTy->getNumElements(), -1); + SmallVector<int, 16> Mask(NewVecTy->getNumElements(), + VecTy->getNumElements()); std::iota(Mask.begin(), Mask.begin() + VecTy->getNumElements(), 0); - Value = Builder.CreateShuffleVector(Value, Mask, "extractVec"); + // Use undef instead of poison for the padding lanes, to make sure no + // padding bits are poisoned, which may break coercion. + Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy), + Mask, "extractVec"); SrcTy = NewVecTy; } if (Addr.getElementType() != SrcTy) diff --git a/clang/lib/CodeGen/CGHLSLBuiltins.cpp b/clang/lib/CodeGen/CGHLSLBuiltins.cpp index 384bd59..fbf4a57 100644 --- a/clang/lib/CodeGen/CGHLSLBuiltins.cpp +++ b/clang/lib/CodeGen/CGHLSLBuiltins.cpp @@ -206,7 +206,7 @@ static Intrinsic::ID getWaveActiveSumIntrinsic(llvm::Triple::ArchType Arch, } } -// Return wave active sum that corresponds to the QT scalar type +// Return wave active max that corresponds to the QT scalar type static Intrinsic::ID getWaveActiveMaxIntrinsic(llvm::Triple::ArchType Arch, CGHLSLRuntime &RT, QualType QT) { switch (Arch) { @@ -225,6 +225,25 @@ static Intrinsic::ID getWaveActiveMaxIntrinsic(llvm::Triple::ArchType Arch, } } +// Return wave active min that corresponds to the QT scalar type +static Intrinsic::ID getWaveActiveMinIntrinsic(llvm::Triple::ArchType Arch, + CGHLSLRuntime &RT, QualType QT) { + switch (Arch) { + case llvm::Triple::spirv: + if (QT->isUnsignedIntegerType()) + return Intrinsic::spv_wave_reduce_umin; + return Intrinsic::spv_wave_reduce_min; + case llvm::Triple::dxil: { + if (QT->isUnsignedIntegerType()) + return Intrinsic::dx_wave_reduce_umin; + return Intrinsic::dx_wave_reduce_min; + } + default: + llvm_unreachable("Intrinsic WaveActiveMin" + " not supported by target architecture"); + } +} + // Returns the mangled name for a builtin function that the SPIR-V backend // will expand into a spec Constant. static std::string getSpecConstantFunctionName(clang::QualType SpecConstantType, @@ -742,6 +761,17 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID, &CGM.getModule(), IID, {OpExpr->getType()}), ArrayRef{OpExpr}, "hlsl.wave.active.max"); } + case Builtin::BI__builtin_hlsl_wave_active_min: { + // Due to the use of variadic arguments, explicitly retreive argument + Value *OpExpr = EmitScalarExpr(E->getArg(0)); + Intrinsic::ID IID = getWaveActiveMinIntrinsic( + getTarget().getTriple().getArch(), CGM.getHLSLRuntime(), + E->getArg(0)->getType()); + + return EmitRuntimeCall(Intrinsic::getOrInsertDeclaration( + &CGM.getModule(), IID, {OpExpr->getType()}), + ArrayRef{OpExpr}, "hlsl.wave.active.min"); + } case Builtin::BI__builtin_hlsl_wave_get_lane_index: { // We don't define a SPIR-V intrinsic, instead it is a SPIR-V built-in // defined in SPIRVBuiltins.td. So instead we manually get the matching name diff --git a/clang/lib/Driver/ToolChains/Fuchsia.cpp b/clang/lib/Driver/ToolChains/Fuchsia.cpp index 31c2f3f..507cc03 100644 --- a/clang/lib/Driver/ToolChains/Fuchsia.cpp +++ b/clang/lib/Driver/ToolChains/Fuchsia.cpp @@ -483,7 +483,8 @@ SanitizerMask Fuchsia::getSupportedSanitizers() const { Res |= SanitizerKind::Leak; Res |= SanitizerKind::Scudo; Res |= SanitizerKind::Thread; - if (getTriple().getArch() == llvm::Triple::x86_64) { + if (getTriple().getArch() == llvm::Triple::x86_64 || + getTriple().getArch() == llvm::Triple::x86) { Res |= SanitizerKind::SafeStack; } return Res; @@ -496,6 +497,7 @@ SanitizerMask Fuchsia::getDefaultSanitizers() const { case llvm::Triple::riscv64: Res |= SanitizerKind::ShadowCallStack; break; + case llvm::Triple::x86: case llvm::Triple::x86_64: Res |= SanitizerKind::SafeStack; break; diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp index 1d0dfd0b..021d8c6 100644 --- a/clang/lib/Format/TokenAnnotator.cpp +++ b/clang/lib/Format/TokenAnnotator.cpp @@ -2674,8 +2674,11 @@ private: } // *a or &a or &&a. - if (PreviousNotConst->is(TT_PointerOrReference)) + if (PreviousNotConst->is(TT_PointerOrReference) || + PreviousNotConst->endsSequence(tok::coloncolon, + TT_PointerOrReference)) { return true; + } // MyClass a; if (PreviousNotConst->isTypeName(LangOpts)) diff --git a/clang/lib/Frontend/TextDiagnostic.cpp b/clang/lib/Frontend/TextDiagnostic.cpp index 5888571..f5add2a 100644 --- a/clang/lib/Frontend/TextDiagnostic.cpp +++ b/clang/lib/Frontend/TextDiagnostic.cpp @@ -17,7 +17,6 @@ #include "llvm/Support/ConvertUTF.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Locale.h" -#include "llvm/Support/raw_ostream.h" #include <algorithm> #include <optional> @@ -662,7 +661,7 @@ void TextDiagnostic::emitDiagnosticMessage( FullSourceLoc Loc, PresumedLoc PLoc, DiagnosticsEngine::Level Level, StringRef Message, ArrayRef<clang::CharSourceRange> Ranges, DiagOrStoredDiag D) { - uint64_t StartOfLocationInfo = OS.tell(); + uint64_t StartOfLocationInfo = OS.getColumn(); // Emit the location of this particular diagnostic. if (Loc.isValid()) @@ -675,8 +674,11 @@ void TextDiagnostic::emitDiagnosticMessage( printDiagnosticLevel(OS, Level, DiagOpts.ShowColors); printDiagnosticMessage(OS, /*IsSupplemental*/ Level == DiagnosticsEngine::Note, - Message, OS.tell() - StartOfLocationInfo, + Message, OS.getColumn() - StartOfLocationInfo, DiagOpts.MessageLength, DiagOpts.ShowColors); + // We use a formatted ostream, which does its own buffering. Flush here + // so we keep the proper order of output. + OS.flush(); } /*static*/ void @@ -1485,7 +1487,7 @@ void TextDiagnostic::emitSnippet(StringRef SourceLine, if (CharStyle != Styles.end()) { if (!CurrentColor || (CurrentColor && *CurrentColor != CharStyle->Color)) { - OS.changeColor(CharStyle->Color, false); + OS.changeColor(CharStyle->Color); CurrentColor = CharStyle->Color; } } else if (CurrentColor) { diff --git a/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h index d973371..a918af3 100644 --- a/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h +++ b/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h @@ -2598,6 +2598,129 @@ _HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_max) __attribute__((convergent)) double4 WaveActiveMax(double4); //===----------------------------------------------------------------------===// +// WaveActiveMin builtins +//===----------------------------------------------------------------------===// + +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) half WaveActiveMin(half); +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) half2 WaveActiveMin(half2); +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) half3 WaveActiveMin(half3); +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) half4 WaveActiveMin(half4); + +#ifdef __HLSL_ENABLE_16_BIT +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) int16_t WaveActiveMin(int16_t); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) int16_t2 WaveActiveMin(int16_t2); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) int16_t3 WaveActiveMin(int16_t3); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) int16_t4 WaveActiveMin(int16_t4); + +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) uint16_t WaveActiveMin(uint16_t); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) uint16_t2 WaveActiveMin(uint16_t2); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) uint16_t3 WaveActiveMin(uint16_t3); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) uint16_t4 WaveActiveMin(uint16_t4); +#endif + +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) int WaveActiveMin(int); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) int2 WaveActiveMin(int2); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) int3 WaveActiveMin(int3); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) int4 WaveActiveMin(int4); + +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) uint WaveActiveMin(uint); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) uint2 WaveActiveMin(uint2); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) uint3 WaveActiveMin(uint3); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) uint4 WaveActiveMin(uint4); + +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) int64_t WaveActiveMin(int64_t); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) int64_t2 WaveActiveMin(int64_t2); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) int64_t3 WaveActiveMin(int64_t3); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) int64_t4 WaveActiveMin(int64_t4); + +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) uint64_t WaveActiveMin(uint64_t); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) uint64_t2 WaveActiveMin(uint64_t2); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) uint64_t3 WaveActiveMin(uint64_t3); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) uint64_t4 WaveActiveMin(uint64_t4); + +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) float WaveActiveMin(float); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) float2 WaveActiveMin(float2); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) float3 WaveActiveMin(float3); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) float4 WaveActiveMin(float4); + +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) double WaveActiveMin(double); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) double2 WaveActiveMin(double2); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) double3 WaveActiveMin(double3); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min) +__attribute__((convergent)) double4 WaveActiveMin(double4); + +//===----------------------------------------------------------------------===// // WaveActiveSum builtins //===----------------------------------------------------------------------===// diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp index 96d5142..94a490a 100644 --- a/clang/lib/Sema/SemaHLSL.cpp +++ b/clang/lib/Sema/SemaHLSL.cpp @@ -3279,6 +3279,7 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { break; } case Builtin::BI__builtin_hlsl_wave_active_max: + case Builtin::BI__builtin_hlsl_wave_active_min: case Builtin::BI__builtin_hlsl_wave_active_sum: { if (SemaRef.checkArgCount(TheCall, 1)) return true; diff --git a/clang/test/AST/ByteCode/builtin-functions.cpp b/clang/test/AST/ByteCode/builtin-functions.cpp index e9093b2..a90f636 100644 --- a/clang/test/AST/ByteCode/builtin-functions.cpp +++ b/clang/test/AST/ByteCode/builtin-functions.cpp @@ -1856,7 +1856,8 @@ namespace InitParam { #endif -namespace SAddOverflowInt { +namespace NonBlockPointerStore { int a; void foo(void) { a *= __builtin_sadd_overflow(1, 2, 0); } + void foo2(void) { a *= __builtin_addc(1, 2, 0, 0); } } diff --git a/clang/test/CodeGen/AArch64/ext-vector-coercion.c b/clang/test/CodeGen/AArch64/ext-vector-coercion.c new file mode 100644 index 0000000..354980a --- /dev/null +++ b/clang/test/CodeGen/AArch64/ext-vector-coercion.c @@ -0,0 +1,42 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 6 +// RUN: %clang_cc1 -fenable-matrix -triple arm64-apple-macosx %s -emit-llvm -disable-llvm-passes -o - | FileCheck %s + +typedef float float3 __attribute__((ext_vector_type(3))); +struct Vec3 { + union { + struct { + float x; + float y; + float z; + }; + float vec __attribute__((ext_vector_type(3))); + }; +}; + +// CHECK-LABEL: define i128 @add( +// CHECK-SAME: i128 [[A_COERCE:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_VEC3:%.*]], align 16 +// CHECK-NEXT: [[A:%.*]] = alloca [[STRUCT_VEC3]], align 16 +// CHECK-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_VEC3]], ptr [[A]], i32 0, i32 0 +// CHECK-NEXT: store i128 [[A_COERCE]], ptr [[COERCE_DIVE]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT_VEC3]], ptr [[A]], i32 0, i32 0 +// CHECK-NEXT: [[LOADVECN:%.*]] = load <4 x float>, ptr [[TMP0]], align 16 +// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <4 x float> [[LOADVECN]], <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2> +// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT_VEC3]], ptr [[A]], i32 0, i32 0 +// CHECK-NEXT: [[LOADVECN1:%.*]] = load <4 x float>, ptr [[TMP1]], align 16 +// CHECK-NEXT: [[EXTRACTVEC2:%.*]] = shufflevector <4 x float> [[LOADVECN1]], <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2> +// CHECK-NEXT: [[ADD:%.*]] = fadd <3 x float> [[EXTRACTVEC]], [[EXTRACTVEC2]] +// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT_VEC3]], ptr [[RETVAL]], i32 0, i32 0 +// CHECK-NEXT: [[EXTRACTVEC3:%.*]] = shufflevector <3 x float> [[ADD]], <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +// CHECK-NEXT: store <4 x float> [[EXTRACTVEC3]], ptr [[TMP2]], align 16 +// CHECK-NEXT: [[COERCE_DIVE4:%.*]] = getelementptr inbounds nuw [[STRUCT_VEC3]], ptr [[RETVAL]], i32 0, i32 0 +// CHECK-NEXT: [[TMP3:%.*]] = load i128, ptr [[COERCE_DIVE4]], align 16 +// CHECK-NEXT: ret i128 [[TMP3]] +// +struct Vec3 add(struct Vec3 a) { + struct Vec3 res; + res.vec = a.vec + a.vec; + return res; +} + diff --git a/clang/test/CodeGenCXX/matrix-vector-bit-int.cpp b/clang/test/CodeGenCXX/matrix-vector-bit-int.cpp index 2e7531b..4be1cb3 100644 --- a/clang/test/CodeGenCXX/matrix-vector-bit-int.cpp +++ b/clang/test/CodeGenCXX/matrix-vector-bit-int.cpp @@ -19,7 +19,7 @@ using i4x3x3 = _BitInt(4) __attribute__((matrix_type(3, 3))); // CHECK-NEXT: store i32 [[A_COERCE]], ptr [[A]], align 4 // CHECK-NEXT: [[LOADVECN:%.*]] = load <4 x i8>, ptr [[A]], align 4 // CHECK-NEXT: [[A1:%.*]] = shufflevector <4 x i8> [[LOADVECN]], <4 x i8> poison, <3 x i32> <i32 0, i32 1, i32 2> -// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <3 x i8> [[A1]], <3 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison> +// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <3 x i8> [[A1]], <3 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> // CHECK-NEXT: store <4 x i8> [[EXTRACTVEC]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[LOADVECN2:%.*]] = load <4 x i8>, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[EXTRACTVEC3:%.*]] = shufflevector <4 x i8> [[LOADVECN2]], <4 x i8> poison, <3 x i32> <i32 0, i32 1, i32 2> @@ -38,7 +38,7 @@ i8x3 v1(i8x3 a) { // CHECK-SAME: <3 x i32> noundef [[A:%.*]]) #[[ATTR1:[0-9]+]] { // CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[A_ADDR:%.*]] = alloca <3 x i32>, align 16 -// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <3 x i32> [[A]], <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison> +// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <3 x i32> [[A]], <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> // CHECK-NEXT: store <4 x i32> [[EXTRACTVEC]], ptr [[A_ADDR]], align 16 // CHECK-NEXT: [[LOADVECN:%.*]] = load <4 x i32>, ptr [[A_ADDR]], align 16 // CHECK-NEXT: [[EXTRACTVEC1:%.*]] = shufflevector <4 x i32> [[LOADVECN]], <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2> @@ -57,7 +57,7 @@ i32x3 v2(i32x3 a) { // CHECK-NEXT: [[A_ADDR:%.*]] = alloca <3 x i512>, align 256 // CHECK-NEXT: [[LOADVECN:%.*]] = load <4 x i512>, ptr [[TMP0]], align 256 // CHECK-NEXT: [[A:%.*]] = shufflevector <4 x i512> [[LOADVECN]], <4 x i512> poison, <3 x i32> <i32 0, i32 1, i32 2> -// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <3 x i512> [[A]], <3 x i512> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison> +// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <3 x i512> [[A]], <3 x i512> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> // CHECK-NEXT: store <4 x i512> [[EXTRACTVEC]], ptr [[A_ADDR]], align 256 // CHECK-NEXT: [[LOADVECN1:%.*]] = load <4 x i512>, ptr [[A_ADDR]], align 256 // CHECK-NEXT: [[EXTRACTVEC2:%.*]] = shufflevector <4 x i512> [[LOADVECN1]], <4 x i512> poison, <3 x i32> <i32 0, i32 1, i32 2> @@ -80,7 +80,7 @@ i512x3 v3(i512x3 a) { // CHECK-NEXT: store i32 [[A_COERCE]], ptr [[A]], align 4 // CHECK-NEXT: [[LOADVECN:%.*]] = load <4 x i4>, ptr [[A]], align 4 // CHECK-NEXT: [[A1:%.*]] = shufflevector <4 x i4> [[LOADVECN]], <4 x i4> poison, <3 x i32> <i32 0, i32 1, i32 2> -// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <3 x i4> [[A1]], <3 x i4> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison> +// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <3 x i4> [[A1]], <3 x i4> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> // CHECK-NEXT: store <4 x i4> [[EXTRACTVEC]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[LOADVECN2:%.*]] = load <4 x i4>, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[EXTRACTVEC3:%.*]] = shufflevector <4 x i4> [[LOADVECN2]], <4 x i4> poison, <3 x i32> <i32 0, i32 1, i32 2> diff --git a/clang/test/CodeGenHLSL/builtins/WaveActiveMin.hlsl b/clang/test/CodeGenHLSL/builtins/WaveActiveMin.hlsl new file mode 100644 index 0000000..1194f84 --- /dev/null +++ b/clang/test/CodeGenHLSL/builtins/WaveActiveMin.hlsl @@ -0,0 +1,46 @@ +// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -triple \ +// RUN: dxil-pc-shadermodel6.3-compute %s -emit-llvm -disable-llvm-passes -o - | \ +// RUN: FileCheck %s --check-prefixes=CHECK,CHECK-DXIL +// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -triple \ +// RUN: spirv-pc-vulkan-compute %s -emit-llvm -disable-llvm-passes -o - | \ +// RUN: FileCheck %s --check-prefixes=CHECK,CHECK-SPIRV + +// Test basic lowering to runtime function call. + +// CHECK-LABEL: test_int +int test_int(int expr) { + // CHECK-SPIRV: %[[RET:.*]] = call spir_func [[TY:.*]] @llvm.spv.wave.reduce.min.i32([[TY]] %[[#]]) + // CHECK-DXIL: %[[RET:.*]] = call [[TY:.*]] @llvm.dx.wave.reduce.min.i32([[TY]] %[[#]]) + // CHECK: ret [[TY]] %[[RET]] + return WaveActiveMin(expr); +} + +// CHECK-DXIL: declare [[TY]] @llvm.dx.wave.reduce.min.i32([[TY]]) #[[#attr:]] +// CHECK-SPIRV: declare [[TY]] @llvm.spv.wave.reduce.min.i32([[TY]]) #[[#attr:]] + +// CHECK-LABEL: test_uint64_t +uint64_t test_uint64_t(uint64_t expr) { + // CHECK-SPIRV: %[[RET:.*]] = call spir_func [[TY:.*]] @llvm.spv.wave.reduce.umin.i64([[TY]] %[[#]]) + // CHECK-DXIL: %[[RET:.*]] = call [[TY:.*]] @llvm.dx.wave.reduce.umin.i64([[TY]] %[[#]]) + // CHECK: ret [[TY]] %[[RET]] + return WaveActiveMin(expr); +} + +// CHECK-DXIL: declare [[TY]] @llvm.dx.wave.reduce.umin.i64([[TY]]) #[[#attr:]] +// CHECK-SPIRV: declare [[TY]] @llvm.spv.wave.reduce.umin.i64([[TY]]) #[[#attr:]] + +// Test basic lowering to runtime function call with array and float value. + +// CHECK-LABEL: test_floatv4 +float4 test_floatv4(float4 expr) { + // CHECK-SPIRV: %[[RET1:.*]] = call reassoc nnan ninf nsz arcp afn spir_func [[TY1:.*]] @llvm.spv.wave.reduce.min.v4f32([[TY1]] %[[#]] + // CHECK-DXIL: %[[RET1:.*]] = call reassoc nnan ninf nsz arcp afn [[TY1:.*]] @llvm.dx.wave.reduce.min.v4f32([[TY1]] %[[#]]) + // CHECK: ret [[TY1]] %[[RET1]] + return WaveActiveMin(expr); +} + +// CHECK-DXIL: declare [[TY1]] @llvm.dx.wave.reduce.min.v4f32([[TY1]]) #[[#attr]] +// CHECK-SPIRV: declare [[TY1]] @llvm.spv.wave.reduce.min.v4f32([[TY1]]) #[[#attr]] + +// CHECK: attributes #[[#attr]] = {{{.*}} convergent {{.*}}} + diff --git a/clang/test/CodeGenOpenCL/preserve_vec3.cl b/clang/test/CodeGenOpenCL/preserve_vec3.cl index e76aa81..0017169 100644 --- a/clang/test/CodeGenOpenCL/preserve_vec3.cl +++ b/clang/test/CodeGenOpenCL/preserve_vec3.cl @@ -12,7 +12,7 @@ typedef float float4 __attribute__((ext_vector_type(4))); // CHECK-SAME: ptr addrspace(1) noundef readonly align 16 captures(none) [[A:%.*]], ptr addrspace(1) noundef writeonly align 16 captures(none) initializes((0, 16)) [[B:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] !kernel_arg_addr_space [[META7:![0-9]+]] !kernel_arg_access_qual [[META8:![0-9]+]] !kernel_arg_type [[META9:![0-9]+]] !kernel_arg_base_type [[META10:![0-9]+]] !kernel_arg_type_qual [[META11:![0-9]+]] { // CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[TMP0:%.*]] = load <3 x float>, ptr addrspace(1) [[A]], align 16 -// CHECK-NEXT: [[EXTRACTVEC1_I:%.*]] = shufflevector <3 x float> [[TMP0]], <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison> +// CHECK-NEXT: [[EXTRACTVEC1_I:%.*]] = shufflevector <3 x float> [[TMP0]], <3 x float> <float undef, float poison, float poison>, <4 x i32> <i32 0, i32 1, i32 2, i32 3> // CHECK-NEXT: store <4 x float> [[EXTRACTVEC1_I]], ptr addrspace(1) [[B]], align 16, !tbaa [[CHAR_TBAA12:![0-9]+]] // CHECK-NEXT: ret void // @@ -24,7 +24,7 @@ void kernel foo(global float3 *a, global float3 *b) { // CHECK-SAME: ptr addrspace(1) noundef writeonly align 16 captures(none) initializes((0, 16)) [[A:%.*]], ptr addrspace(1) noundef readonly align 16 captures(none) [[B:%.*]]) local_unnamed_addr #[[ATTR0]] !kernel_arg_addr_space [[META7]] !kernel_arg_access_qual [[META8]] !kernel_arg_type [[META13:![0-9]+]] !kernel_arg_base_type [[META14:![0-9]+]] !kernel_arg_type_qual [[META11]] { // CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[TMP0:%.*]] = load <3 x float>, ptr addrspace(1) [[B]], align 16, !tbaa [[CHAR_TBAA12]] -// CHECK-NEXT: [[EXTRACTVEC_I:%.*]] = shufflevector <3 x float> [[TMP0]], <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison> +// CHECK-NEXT: [[EXTRACTVEC_I:%.*]] = shufflevector <3 x float> [[TMP0]], <3 x float> <float undef, float poison, float poison>, <4 x i32> <i32 0, i32 1, i32 2, i32 3> // CHECK-NEXT: store <4 x float> [[EXTRACTVEC_I]], ptr addrspace(1) [[A]], align 16, !tbaa [[CHAR_TBAA12]] // CHECK-NEXT: ret void // @@ -60,7 +60,7 @@ void kernel float3_to_double2(global float3 *a, global double2 *b) { // CHECK-SAME: ptr addrspace(1) noundef writeonly align 8 captures(none) initializes((0, 8)) [[A:%.*]], ptr addrspace(1) noundef readonly align 8 captures(none) [[B:%.*]]) local_unnamed_addr #[[ATTR0]] !kernel_arg_addr_space [[META7]] !kernel_arg_access_qual [[META8]] !kernel_arg_type [[META17:![0-9]+]] !kernel_arg_base_type [[META18:![0-9]+]] !kernel_arg_type_qual [[META11]] { // CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[TMP0:%.*]] = load <3 x i16>, ptr addrspace(1) [[B]], align 8, !tbaa [[CHAR_TBAA12]] -// CHECK-NEXT: [[EXTRACTVEC_I:%.*]] = shufflevector <3 x i16> [[TMP0]], <3 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison> +// CHECK-NEXT: [[EXTRACTVEC_I:%.*]] = shufflevector <3 x i16> [[TMP0]], <3 x i16> <i16 undef, i16 poison, i16 poison>, <4 x i32> <i32 0, i32 1, i32 2, i32 3> // CHECK-NEXT: store <4 x i16> [[EXTRACTVEC_I]], ptr addrspace(1) [[A]], align 8, !tbaa [[CHAR_TBAA12]] // CHECK-NEXT: ret void // @@ -71,8 +71,8 @@ void kernel char8_to_short3(global short3 *a, global char8 *b) { // CHECK-LABEL: define dso_local spir_func void @from_char3( // CHECK-SAME: <3 x i8> noundef [[A:%.*]], ptr addrspace(1) noundef writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2:[0-9]+]] { // CHECK-NEXT: [[ENTRY:.*:]] -// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <3 x i8> [[A]], <3 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison> -// CHECK-NEXT: store <4 x i8> [[EXTRACTVEC]], ptr addrspace(1) [[OUT]], align 4, !tbaa [[INT_TBAA3:![0-9]+]] +// CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i8> [[A]], <3 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison> +// CHECK-NEXT: store <4 x i8> [[TMP0]], ptr addrspace(1) [[OUT]], align 4, !tbaa [[INT_TBAA3:![0-9]+]] // CHECK-NEXT: ret void // void from_char3(char3 a, global int *out) { @@ -82,8 +82,8 @@ void from_char3(char3 a, global int *out) { // CHECK-LABEL: define dso_local spir_func void @from_short3( // CHECK-SAME: <3 x i16> noundef [[A:%.*]], ptr addrspace(1) noundef writeonly captures(none) initializes((0, 8)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2]] { // CHECK-NEXT: [[ENTRY:.*:]] -// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <3 x i16> [[A]], <3 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison> -// CHECK-NEXT: store <4 x i16> [[EXTRACTVEC]], ptr addrspace(1) [[OUT]], align 8, !tbaa [[LONG_TBAA19:![0-9]+]] +// CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i16> [[A]], <3 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison> +// CHECK-NEXT: store <4 x i16> [[TMP0]], ptr addrspace(1) [[OUT]], align 8, !tbaa [[LONG_TBAA19:![0-9]+]] // CHECK-NEXT: ret void // void from_short3(short3 a, global long *out) { @@ -94,7 +94,8 @@ void from_short3(short3 a, global long *out) { // CHECK-SAME: i32 noundef [[A:%.*]], ptr addrspace(1) noundef writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2]] { // CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 [[A]] to <4 x i8> -// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <4 x i8> [[TMP0]], <4 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison> +// CHECK-NEXT: [[ASTYPE:%.*]] = shufflevector <4 x i8> [[TMP0]], <4 x i8> poison, <3 x i32> <i32 0, i32 1, i32 2> +// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <3 x i8> [[ASTYPE]], <3 x i8> <i8 undef, i8 poison, i8 poison>, <4 x i32> <i32 0, i32 1, i32 2, i32 3> // CHECK-NEXT: store <4 x i8> [[EXTRACTVEC]], ptr addrspace(1) [[OUT]], align 4, !tbaa [[CHAR_TBAA12]] // CHECK-NEXT: ret void // @@ -106,7 +107,8 @@ void scalar_to_char3(int a, global char3 *out) { // CHECK-SAME: i64 noundef [[A:%.*]], ptr addrspace(1) noundef writeonly captures(none) initializes((0, 8)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2]] { // CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[TMP0:%.*]] = bitcast i64 [[A]] to <4 x i16> -// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <4 x i16> [[TMP0]], <4 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison> +// CHECK-NEXT: [[ASTYPE:%.*]] = shufflevector <4 x i16> [[TMP0]], <4 x i16> poison, <3 x i32> <i32 0, i32 1, i32 2> +// CHECK-NEXT: [[EXTRACTVEC:%.*]] = shufflevector <3 x i16> [[ASTYPE]], <3 x i16> <i16 undef, i16 poison, i16 poison>, <4 x i32> <i32 0, i32 1, i32 2, i32 3> // CHECK-NEXT: store <4 x i16> [[EXTRACTVEC]], ptr addrspace(1) [[OUT]], align 8, !tbaa [[CHAR_TBAA12]] // CHECK-NEXT: ret void // diff --git a/clang/include/clang/Driver/aarch64-mlr-for-calls-only.c b/clang/test/Driver/aarch64-mlr-for-calls-only.c index e71a4cd..e71a4cd 100644 --- a/clang/include/clang/Driver/aarch64-mlr-for-calls-only.c +++ b/clang/test/Driver/aarch64-mlr-for-calls-only.c diff --git a/clang/test/Driver/fuchsia.c b/clang/test/Driver/fuchsia.c index d0fec18..99e5018 100644 --- a/clang/test/Driver/fuchsia.c +++ b/clang/test/Driver/fuchsia.c @@ -130,6 +130,11 @@ // RUN: -resource-dir=%S/Inputs/resource_dir_with_per_target_subdir \ // RUN: -fuse-ld=ld \ // RUN: | FileCheck %s -check-prefix=CHECK-SAFESTACK +// RUN: %clang -### %s --target=x86_64-unknown-fuchsia -m32 \ +// RUN: -fsanitize=safe-stack 2>&1 \ +// RUN: -resource-dir=%S/Inputs/resource_dir_with_per_target_subdir \ +// RUN: -fuse-ld=ld \ +// RUN: | FileCheck %s -check-prefix=CHECK-SAFESTACK // CHECK-SAFESTACK: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" // CHECK-SAFESTACK: "-fsanitize=safe-stack" // CHECK-SAFESTACK-NOT: "[[RESOURCE_DIR]]{{/|\\\\}}lib{{/|\\\\}}x86_64-unknown-fuchsia{{/|\\\\}}libclang_rt.safestack.a" diff --git a/clang/test/Frontend/diag-wrap-colors.cpp b/clang/test/Frontend/diag-wrap-colors.cpp new file mode 100644 index 0000000..e3dccb1 --- /dev/null +++ b/clang/test/Frontend/diag-wrap-colors.cpp @@ -0,0 +1,6 @@ +// RUN: not %clang_cc1 %s -fmessage-length=50 -fcolor-diagnostics -fno-show-source-location -o - 2>&1 | FileCheck %s + +struct F { + float a : 10; +}; +// CHECK: bit-field 'a' has non-integral type 'float' diff --git a/clang/test/Preprocessor/bpf-predefined-macros.c b/clang/test/Preprocessor/bpf-predefined-macros.c index cd8a2ec..a9ae8c5 100644 --- a/clang/test/Preprocessor/bpf-predefined-macros.c +++ b/clang/test/Preprocessor/bpf-predefined-macros.c @@ -70,6 +70,9 @@ int u; #ifdef __BPF_FEATURE_LOAD_ACQ_STORE_REL int v; #endif +#ifdef __BPF_FEATURE_GOTOX +int w; +#endif // CHECK: int b; // CHECK: int c; @@ -110,6 +113,7 @@ int v; // CPU_V4: int u; // CPU_V4: int v; +// CPU_V4: int w; // CPU_GENERIC: int g; diff --git a/clang/test/SemaHLSL/BuiltIns/WaveActiveMin.hlsl b/clang/test/SemaHLSL/BuiltIns/WaveActiveMin.hlsl new file mode 100644 index 0000000..3b12faf --- /dev/null +++ b/clang/test/SemaHLSL/BuiltIns/WaveActiveMin.hlsl @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -emit-llvm-only -disable-llvm-passes -verify + +int test_too_few_arg() { + return __builtin_hlsl_wave_active_min(); + // expected-error@-1 {{too few arguments to function call, expected 1, have 0}} +} + +float2 test_too_many_arg(float2 p0) { + return __builtin_hlsl_wave_active_min(p0, p0); + // expected-error@-1 {{too many arguments to function call, expected 1, have 2}} +} + +bool test_expr_bool_type_check(bool p0) { + return __builtin_hlsl_wave_active_min(p0); + // expected-error@-1 {{invalid operand of type 'bool'}} +} + +bool2 test_expr_bool_vec_type_check(bool2 p0) { + return __builtin_hlsl_wave_active_min(p0); + // expected-error@-1 {{invalid operand of type 'bool2' (aka 'vector<bool, 2>')}} +} + +struct S { float f; }; + +S test_expr_struct_type_check(S p0) { + return __builtin_hlsl_wave_active_min(p0); + // expected-error@-1 {{invalid operand of type 'S' where a scalar or vector is required}} +} + diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp index ca99940..c046142 100644 --- a/clang/unittests/Format/TokenAnnotatorTest.cpp +++ b/clang/unittests/Format/TokenAnnotatorTest.cpp @@ -1119,6 +1119,11 @@ TEST_F(TokenAnnotatorTest, UnderstandsOverloadedOperators) { EXPECT_TOKEN(Tokens[8], tok::amp, TT_PointerOrReference); EXPECT_TOKEN(Tokens[12], tok::amp, TT_PointerOrReference); + Tokens = annotate("::foo::bar& ::foo::bar::operator=(::foo::bar& other);"); + ASSERT_EQ(Tokens.size(), 22u) << Tokens; + EXPECT_TOKEN(Tokens[6], tok::identifier, TT_FunctionDeclarationName); + EXPECT_TOKEN(Tokens[17], tok::amp, TT_PointerOrReference); + Tokens = annotate("SomeLoooooooooooooooooType::Awaitable\n" "SomeLoooooooooooooooooType::operator co_await();"); ASSERT_EQ(Tokens.size(), 11u) << Tokens; @@ -3484,6 +3489,10 @@ TEST_F(TokenAnnotatorTest, StartOfName) { ASSERT_EQ(Tokens.size(), 8u) << Tokens; EXPECT_TOKEN(Tokens[2], tok::identifier, TT_Unknown); // Not StartOfName + Tokens = annotate("int* ::foo::bar;"); + ASSERT_EQ(Tokens.size(), 8u) << Tokens; + EXPECT_TOKEN(Tokens[3], tok::identifier, TT_StartOfName); + auto Style = getLLVMStyle(); Style.StatementAttributeLikeMacros.push_back("emit"); Tokens = annotate("emit foo = 0;", Style); diff --git a/flang/include/flang/Optimizer/Builder/IntrinsicCall.h b/flang/include/flang/Optimizer/Builder/IntrinsicCall.h index c3cd119b..3407dd0 100644 --- a/flang/include/flang/Optimizer/Builder/IntrinsicCall.h +++ b/flang/include/flang/Optimizer/Builder/IntrinsicCall.h @@ -211,6 +211,8 @@ struct IntrinsicLibrary { mlir::Value genBarrierArrive(mlir::Type, llvm::ArrayRef<mlir::Value>); mlir::Value genBarrierArriveCnt(mlir::Type, llvm::ArrayRef<mlir::Value>); void genBarrierInit(llvm::ArrayRef<fir::ExtendedValue>); + mlir::Value genBarrierTryWait(mlir::Type, llvm::ArrayRef<mlir::Value>); + mlir::Value genBarrierTryWaitSleep(mlir::Type, llvm::ArrayRef<mlir::Value>); fir::ExtendedValue genBesselJn(mlir::Type, llvm::ArrayRef<fir::ExtendedValue>); fir::ExtendedValue genBesselYn(mlir::Type, @@ -459,7 +461,21 @@ struct IntrinsicLibrary { mlir::Value genTime(mlir::Type, llvm::ArrayRef<mlir::Value>); void genTMABulkCommitGroup(llvm::ArrayRef<fir::ExtendedValue>); void genTMABulkG2S(llvm::ArrayRef<fir::ExtendedValue>); + void genTMABulkLoadC4(llvm::ArrayRef<fir::ExtendedValue>); + void genTMABulkLoadC8(llvm::ArrayRef<fir::ExtendedValue>); + void genTMABulkLoadI4(llvm::ArrayRef<fir::ExtendedValue>); + void genTMABulkLoadI8(llvm::ArrayRef<fir::ExtendedValue>); + void genTMABulkLoadR2(llvm::ArrayRef<fir::ExtendedValue>); + void genTMABulkLoadR4(llvm::ArrayRef<fir::ExtendedValue>); + void genTMABulkLoadR8(llvm::ArrayRef<fir::ExtendedValue>); void genTMABulkS2G(llvm::ArrayRef<fir::ExtendedValue>); + void genTMABulkStoreI4(llvm::ArrayRef<fir::ExtendedValue>); + void genTMABulkStoreI8(llvm::ArrayRef<fir::ExtendedValue>); + void genTMABulkStoreR2(llvm::ArrayRef<fir::ExtendedValue>); + void genTMABulkStoreR4(llvm::ArrayRef<fir::ExtendedValue>); + void genTMABulkStoreR8(llvm::ArrayRef<fir::ExtendedValue>); + void genTMABulkStoreC4(llvm::ArrayRef<fir::ExtendedValue>); + void genTMABulkStoreC8(llvm::ArrayRef<fir::ExtendedValue>); void genTMABulkWaitGroup(llvm::ArrayRef<fir::ExtendedValue>); mlir::Value genTrailz(mlir::Type, llvm::ArrayRef<mlir::Value>); fir::ExtendedValue genTransfer(mlir::Type, diff --git a/flang/include/flang/Parser/dump-parse-tree.h b/flang/include/flang/Parser/dump-parse-tree.h index 553cbd5..bb97069 100644 --- a/flang/include/flang/Parser/dump-parse-tree.h +++ b/flang/include/flang/Parser/dump-parse-tree.h @@ -599,7 +599,7 @@ public: NODE(parser, OmpInitClause) NODE(OmpInitClause, Modifier) NODE(parser, OmpInitializerClause) - NODE(parser, OmpInitializerProc) + NODE(parser, OmpInitializerExpression) NODE(parser, OmpInReductionClause) NODE(OmpInReductionClause, Modifier) NODE(parser, OmpInteropPreference) @@ -677,6 +677,10 @@ public: NODE_ENUM(OmpSeverityClause, Severity) NODE(parser, OmpStepComplexModifier) NODE(parser, OmpStepSimpleModifier) + NODE(parser, OmpStylizedDeclaration) + NODE(parser, OmpStylizedExpression) + NODE(parser, OmpStylizedInstance) + NODE(OmpStylizedInstance, Instance) NODE(parser, OmpTaskDependenceType) NODE_ENUM(OmpTaskDependenceType, Value) NODE(parser, OmpTaskReductionClause) diff --git a/flang/include/flang/Parser/openmp-utils.h b/flang/include/flang/Parser/openmp-utils.h index f761332..49db091 100644 --- a/flang/include/flang/Parser/openmp-utils.h +++ b/flang/include/flang/Parser/openmp-utils.h @@ -25,6 +25,13 @@ namespace Fortran::parser::omp { +template <typename T> constexpr auto addr_if(std::optional<T> &x) { + return x ? &*x : nullptr; +} +template <typename T> constexpr auto addr_if(const std::optional<T> &x) { + return x ? &*x : nullptr; +} + namespace detail { using D = llvm::omp::Directive; @@ -133,9 +140,24 @@ template <typename T> OmpDirectiveName GetOmpDirectiveName(const T &x) { } const OmpObjectList *GetOmpObjectList(const OmpClause &clause); + +template <typename T> +const T *GetFirstArgument(const OmpDirectiveSpecification &spec) { + for (const OmpArgument &arg : spec.Arguments().v) { + if (auto *t{std::get_if<T>(&arg.u)}) { + return t; + } + } + return nullptr; +} + const BlockConstruct *GetFortranBlockConstruct( const ExecutionPartConstruct &epc); +const OmpCombinerExpression *GetCombinerExpr( + const OmpReductionSpecifier &rspec); +const OmpInitializerExpression *GetInitializerExpr(const OmpClause &init); + } // namespace Fortran::parser::omp #endif // FORTRAN_PARSER_OPENMP_UTILS_H diff --git a/flang/include/flang/Parser/parse-tree.h b/flang/include/flang/Parser/parse-tree.h index 2cf6fae..c3a8c2e 100644 --- a/flang/include/flang/Parser/parse-tree.h +++ b/flang/include/flang/Parser/parse-tree.h @@ -24,7 +24,9 @@ #include "provenance.h" #include "flang/Common/idioms.h" #include "flang/Common/indirection.h" +#include "flang/Common/reference.h" #include "flang/Support/Fortran.h" +#include "llvm/ADT/ArrayRef.h" #include "llvm/Frontend/OpenACC/ACC.h.inc" #include "llvm/Frontend/OpenMP/OMP.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" @@ -3510,6 +3512,8 @@ struct OmpDirectiveName { // type-name list item struct OmpTypeName { + CharBlock source; + mutable const semantics::DeclTypeSpec *declTypeSpec{nullptr}; UNION_CLASS_BOILERPLATE(OmpTypeName); std::variant<TypeSpec, DeclarationTypeSpec> u; }; @@ -3538,6 +3542,39 @@ struct OmpObjectList { WRAPPER_CLASS_BOILERPLATE(OmpObjectList, std::list<OmpObject>); }; +struct OmpStylizedDeclaration { + COPY_AND_ASSIGN_BOILERPLATE(OmpStylizedDeclaration); + // Since "Reference" isn't handled by parse-tree-visitor, add EmptyTrait, + // and visit the members by hand when needed. + using EmptyTrait = std::true_type; + common::Reference<const OmpTypeName> type; + EntityDecl var; +}; + +struct OmpStylizedInstance { + struct Instance { + UNION_CLASS_BOILERPLATE(Instance); + std::variant<AssignmentStmt, CallStmt, common::Indirection<Expr>> u; + }; + TUPLE_CLASS_BOILERPLATE(OmpStylizedInstance); + std::tuple<std::list<OmpStylizedDeclaration>, Instance> t; +}; + +class ParseState; + +// Ref: [5.2:76], [6.0:185] +// +struct OmpStylizedExpression { + CharBlock source; + // Pointer to a temporary copy of the ParseState that is used to create + // additional parse subtrees for the stylized expression. This is only + // used internally during parsing and conveys no information to the + // consumers of the AST. + const ParseState *state{nullptr}; + WRAPPER_CLASS_BOILERPLATE( + OmpStylizedExpression, std::list<OmpStylizedInstance>); +}; + // Ref: [4.5:201-207], [5.0:293-299], [5.1:325-331], [5.2:124] // // reduction-identifier -> @@ -3555,9 +3592,22 @@ struct OmpReductionIdentifier { // combiner-expression -> // since 4.5 // assignment-statement | // function-reference -struct OmpCombinerExpression { - UNION_CLASS_BOILERPLATE(OmpCombinerExpression); - std::variant<AssignmentStmt, FunctionReference> u; +struct OmpCombinerExpression : public OmpStylizedExpression { + INHERITED_WRAPPER_CLASS_BOILERPLATE( + OmpCombinerExpression, OmpStylizedExpression); + static llvm::ArrayRef<CharBlock> Variables(); +}; + +// Ref: [4.5:222:7-8], [5.0:305:28-29], [5.1:337:20-21], [5.2:127:6-8], +// [6.0:242:3-5] +// +// initializer-expression -> // since 4.5 +// OMP_PRIV = expression | +// subroutine-name(argument-list) +struct OmpInitializerExpression : public OmpStylizedExpression { + INHERITED_WRAPPER_CLASS_BOILERPLATE( + OmpInitializerExpression, OmpStylizedExpression); + static llvm::ArrayRef<CharBlock> Variables(); }; inline namespace arguments { @@ -4558,16 +4608,9 @@ struct OmpInReductionClause { std::tuple<MODIFIERS(), OmpObjectList> t; }; -// declare-reduction -> DECLARE REDUCTION (reduction-identifier : type-list -// : combiner) [initializer-clause] -struct OmpInitializerProc { - TUPLE_CLASS_BOILERPLATE(OmpInitializerProc); - std::tuple<ProcedureDesignator, std::list<ActualArgSpec>> t; -}; // Initialization for declare reduction construct struct OmpInitializerClause { - UNION_CLASS_BOILERPLATE(OmpInitializerClause); - std::variant<OmpInitializerProc, AssignmentStmt> u; + WRAPPER_CLASS_BOILERPLATE(OmpInitializerClause, OmpInitializerExpression); }; // Ref: [4.5:199-201], [5.0:288-290], [5.1:321-322], [5.2:115-117] diff --git a/flang/include/flang/Semantics/symbol.h b/flang/include/flang/Semantics/symbol.h index 04a0639..cb27d544 100644 --- a/flang/include/flang/Semantics/symbol.h +++ b/flang/include/flang/Semantics/symbol.h @@ -830,6 +830,8 @@ public: OmpUseDevicePtr, OmpUseDeviceAddr, OmpIsDevicePtr, OmpHasDeviceAddr, // OpenMP data-copying attribute OmpCopyIn, OmpCopyPrivate, + // OpenMP special variables + OmpInVar, OmpOrigVar, OmpOutVar, OmpPrivVar, // OpenMP miscellaneous flags OmpCommonBlock, OmpReduction, OmpInReduction, OmpAligned, OmpNontemporal, OmpAllocate, OmpDeclarativeAllocateDirective, diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp index 39bac81..53fe9c0 100644 --- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp +++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp @@ -50,6 +50,7 @@ #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" #include "mlir/Dialect/Math/IR/Math.h" +#include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/Vector/IR/VectorOps.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" @@ -358,6 +359,14 @@ static constexpr IntrinsicHandler handlers[]{ &I::genBarrierInit, {{{"barrier", asAddr}, {"count", asValue}}}, /*isElemental=*/false}, + {"barrier_try_wait", + &I::genBarrierTryWait, + {{{"barrier", asAddr}, {"token", asValue}}}, + /*isElemental=*/false}, + {"barrier_try_wait_sleep", + &I::genBarrierTryWaitSleep, + {{{"barrier", asAddr}, {"token", asValue}, {"ns", asValue}}}, + /*isElemental=*/false}, {"bessel_jn", &I::genBesselJn, {{{"n1", asValue}, {"n2", asValue}, {"x", asValue}}}, @@ -1036,10 +1045,87 @@ static constexpr IntrinsicHandler handlers[]{ {"dst", asAddr}, {"nbytes", asValue}}}, /*isElemental=*/false}, + {"tma_bulk_ldc4", + &I::genTMABulkLoadC4, + {{{"barrier", asAddr}, + {"src", asAddr}, + {"dst", asAddr}, + {"nelems", asValue}}}, + /*isElemental=*/false}, + {"tma_bulk_ldc8", + &I::genTMABulkLoadC8, + {{{"barrier", asAddr}, + {"src", asAddr}, + {"dst", asAddr}, + {"nelems", asValue}}}, + /*isElemental=*/false}, + {"tma_bulk_ldi4", + &I::genTMABulkLoadI4, + {{{"barrier", asAddr}, + {"src", asAddr}, + {"dst", asAddr}, + {"nelems", asValue}}}, + /*isElemental=*/false}, + {"tma_bulk_ldi8", + &I::genTMABulkLoadI8, + {{{"barrier", asAddr}, + {"src", asAddr}, + {"dst", asAddr}, + {"nelems", asValue}}}, + /*isElemental=*/false}, + {"tma_bulk_ldr2", + &I::genTMABulkLoadR2, + {{{"barrier", asAddr}, + {"src", asAddr}, + {"dst", asAddr}, + {"nelems", asValue}}}, + /*isElemental=*/false}, + {"tma_bulk_ldr4", + &I::genTMABulkLoadR4, + {{{"barrier", asAddr}, + {"src", asAddr}, + {"dst", asAddr}, + {"nelems", asValue}}}, + /*isElemental=*/false}, + {"tma_bulk_ldr8", + &I::genTMABulkLoadR8, + {{{"barrier", asAddr}, + {"src", asAddr}, + {"dst", asAddr}, + {"nelems", asValue}}}, + /*isElemental=*/false}, {"tma_bulk_s2g", &I::genTMABulkS2G, {{{"src", asAddr}, {"dst", asAddr}, {"nbytes", asValue}}}, /*isElemental=*/false}, + {"tma_bulk_store_c4", + &I::genTMABulkStoreC4, + {{{"src", asAddr}, {"dst", asAddr}, {"count", asValue}}}, + /*isElemental=*/false}, + {"tma_bulk_store_c8", + &I::genTMABulkStoreC8, + {{{"src", asAddr}, {"dst", asAddr}, {"count", asValue}}}, + /*isElemental=*/false}, + {"tma_bulk_store_i4", + &I::genTMABulkStoreI4, + {{{"src", asAddr}, {"dst", asAddr}, {"count", asValue}}}, + /*isElemental=*/false}, + {"tma_bulk_store_i8", + &I::genTMABulkStoreI8, + {{{"src", asAddr}, {"dst", asAddr}, {"count", asValue}}}, + /*isElemental=*/false}, + {"tma_bulk_store_r2", + &I::genTMABulkStoreR2, + {{{"src", asAddr}, {"dst", asAddr}, {"count", asValue}}}, + /*isElemental=*/false}, + {"tma_bulk_store_r4", + &I::genTMABulkStoreR4, + {{{"src", asAddr}, {"dst", asAddr}, {"count", asValue}}}, + /*isElemental=*/false}, + {"tma_bulk_store_r8", + &I::genTMABulkStoreR8, + {{{"src", asAddr}, {"dst", asAddr}, {"count", asValue}}}, + /*isElemental=*/false}, {"tma_bulk_wait_group", &I::genTMABulkWaitGroup, {{}}, @@ -3282,6 +3368,57 @@ void IntrinsicLibrary::genBarrierInit(llvm::ArrayRef<fir::ExtendedValue> args) { mlir::NVVM::FenceProxyOp::create(builder, loc, kind, space); } +// BARRIER_TRY_WAIT (CUDA) +mlir::Value +IntrinsicLibrary::genBarrierTryWait(mlir::Type resultType, + llvm::ArrayRef<mlir::Value> args) { + assert(args.size() == 2); + mlir::Value res = fir::AllocaOp::create(builder, loc, resultType); + mlir::Value zero = builder.createIntegerConstant(loc, resultType, 0); + fir::StoreOp::create(builder, loc, zero, res); + mlir::Value ns = + builder.createIntegerConstant(loc, builder.getI32Type(), 1000000); + mlir::Value load = fir::LoadOp::create(builder, loc, res); + auto whileOp = mlir::scf::WhileOp::create( + builder, loc, mlir::TypeRange{resultType}, mlir::ValueRange{load}); + mlir::Block *beforeBlock = builder.createBlock(&whileOp.getBefore()); + mlir::Value beforeArg = beforeBlock->addArgument(resultType, loc); + builder.setInsertionPointToStart(beforeBlock); + mlir::Value condition = mlir::arith::CmpIOp::create( + builder, loc, mlir::arith::CmpIPredicate::ne, beforeArg, zero); + mlir::scf::ConditionOp::create(builder, loc, condition, beforeArg); + mlir::Block *afterBlock = builder.createBlock(&whileOp.getAfter()); + afterBlock->addArgument(resultType, loc); + builder.setInsertionPointToStart(afterBlock); + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(builder.getContext()); + auto barrier = builder.createConvert(loc, llvmPtrTy, args[0]); + mlir::Value ret = + mlir::NVVM::InlinePtxOp::create( + builder, loc, {resultType}, {barrier, args[1], ns}, {}, + ".reg .pred p; mbarrier.try_wait.shared.b64 p, [%1], %2, %3; " + "selp.b32 %0, 1, 0, p;", + {}) + .getResult(0); + mlir::scf::YieldOp::create(builder, loc, ret); + builder.setInsertionPointAfter(whileOp); + return whileOp.getResult(0); +} + +// BARRIER_TRY_WAIT_SLEEP (CUDA) +mlir::Value +IntrinsicLibrary::genBarrierTryWaitSleep(mlir::Type resultType, + llvm::ArrayRef<mlir::Value> args) { + assert(args.size() == 3); + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(builder.getContext()); + auto barrier = builder.createConvert(loc, llvmPtrTy, args[0]); + return mlir::NVVM::InlinePtxOp::create( + builder, loc, {resultType}, {barrier, args[1], args[2]}, {}, + ".reg .pred p; mbarrier.try_wait.shared.b64 p, [%1], %2, %3; " + "selp.b32 %0, 1, 0, p;", + {}) + .getResult(0); +} + // BESSEL_JN fir::ExtendedValue IntrinsicLibrary::genBesselJn(mlir::Type resultType, @@ -9218,6 +9355,93 @@ void IntrinsicLibrary::genTMABulkG2S(llvm::ArrayRef<fir::ExtendedValue> args) { builder, loc, dst, src, barrier, fir::getBase(args[3]), {}, {}); } +static void genTMABulkLoad(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value barrier, mlir::Value src, + mlir::Value dst, mlir::Value nelem, + mlir::Value eleSize) { + mlir::Value size = mlir::arith::MulIOp::create(builder, loc, nelem, eleSize); + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(builder.getContext()); + barrier = builder.createConvert(loc, llvmPtrTy, barrier); + mlir::NVVM::InlinePtxOp::create( + builder, loc, mlir::TypeRange{}, {dst, src, size, barrier}, {}, + "cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%0], " + "[%1], %2, [%3];", + {}); + mlir::NVVM::InlinePtxOp::create( + builder, loc, mlir::TypeRange{}, {barrier, size}, {}, + "mbarrier.expect_tx.relaxed.cta.shared::cta.b64 [%0], %1;", {}); +} + +// TMA_BULK_LOADC4 +void IntrinsicLibrary::genTMABulkLoadC4( + llvm::ArrayRef<fir::ExtendedValue> args) { + assert(args.size() == 4); + mlir::Value eleSize = + builder.createIntegerConstant(loc, builder.getI32Type(), 8); + genTMABulkLoad(builder, loc, fir::getBase(args[0]), fir::getBase(args[1]), + fir::getBase(args[2]), fir::getBase(args[3]), eleSize); +} + +// TMA_BULK_LOADC8 +void IntrinsicLibrary::genTMABulkLoadC8( + llvm::ArrayRef<fir::ExtendedValue> args) { + assert(args.size() == 4); + mlir::Value eleSize = + builder.createIntegerConstant(loc, builder.getI32Type(), 16); + genTMABulkLoad(builder, loc, fir::getBase(args[0]), fir::getBase(args[1]), + fir::getBase(args[2]), fir::getBase(args[3]), eleSize); +} + +// TMA_BULK_LOADI4 +void IntrinsicLibrary::genTMABulkLoadI4( + llvm::ArrayRef<fir::ExtendedValue> args) { + assert(args.size() == 4); + mlir::Value eleSize = + builder.createIntegerConstant(loc, builder.getI32Type(), 4); + genTMABulkLoad(builder, loc, fir::getBase(args[0]), fir::getBase(args[1]), + fir::getBase(args[2]), fir::getBase(args[3]), eleSize); +} + +// TMA_BULK_LOADI8 +void IntrinsicLibrary::genTMABulkLoadI8( + llvm::ArrayRef<fir::ExtendedValue> args) { + assert(args.size() == 4); + mlir::Value eleSize = + builder.createIntegerConstant(loc, builder.getI32Type(), 8); + genTMABulkLoad(builder, loc, fir::getBase(args[0]), fir::getBase(args[1]), + fir::getBase(args[2]), fir::getBase(args[3]), eleSize); +} + +// TMA_BULK_LOADR2 +void IntrinsicLibrary::genTMABulkLoadR2( + llvm::ArrayRef<fir::ExtendedValue> args) { + assert(args.size() == 4); + mlir::Value eleSize = + builder.createIntegerConstant(loc, builder.getI32Type(), 2); + genTMABulkLoad(builder, loc, fir::getBase(args[0]), fir::getBase(args[1]), + fir::getBase(args[2]), fir::getBase(args[3]), eleSize); +} + +// TMA_BULK_LOADR4 +void IntrinsicLibrary::genTMABulkLoadR4( + llvm::ArrayRef<fir::ExtendedValue> args) { + assert(args.size() == 4); + mlir::Value eleSize = + builder.createIntegerConstant(loc, builder.getI32Type(), 4); + genTMABulkLoad(builder, loc, fir::getBase(args[0]), fir::getBase(args[1]), + fir::getBase(args[2]), fir::getBase(args[3]), eleSize); +} + +// TMA_BULK_LOADR8 +void IntrinsicLibrary::genTMABulkLoadR8( + llvm::ArrayRef<fir::ExtendedValue> args) { + assert(args.size() == 4); + mlir::Value eleSize = + builder.createIntegerConstant(loc, builder.getI32Type(), 8); + genTMABulkLoad(builder, loc, fir::getBase(args[0]), fir::getBase(args[1]), + fir::getBase(args[2]), fir::getBase(args[3]), eleSize); +} + // TMA_BULK_S2G (CUDA) void IntrinsicLibrary::genTMABulkS2G(llvm::ArrayRef<fir::ExtendedValue> args) { assert(args.size() == 3); @@ -9227,6 +9451,97 @@ void IntrinsicLibrary::genTMABulkS2G(llvm::ArrayRef<fir::ExtendedValue> args) { mlir::NVVM::NVVMMemorySpace::Global); mlir::NVVM::CpAsyncBulkSharedCTAToGlobalOp::create( builder, loc, dst, src, fir::getBase(args[2]), {}, {}); + + mlir::NVVM::InlinePtxOp::create(builder, loc, mlir::TypeRange{}, {}, {}, + "cp.async.bulk.commit_group", {}); + mlir::NVVM::CpAsyncBulkWaitGroupOp::create(builder, loc, + builder.getI32IntegerAttr(0), {}); +} + +static void genTMABulkStore(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value src, mlir::Value dst, mlir::Value count, + mlir::Value eleSize) { + mlir::Value size = mlir::arith::MulIOp::create(builder, loc, eleSize, count); + src = convertPtrToNVVMSpace(builder, loc, src, + mlir::NVVM::NVVMMemorySpace::Shared); + dst = convertPtrToNVVMSpace(builder, loc, dst, + mlir::NVVM::NVVMMemorySpace::Global); + mlir::NVVM::CpAsyncBulkSharedCTAToGlobalOp::create(builder, loc, dst, src, + size, {}, {}); + mlir::NVVM::InlinePtxOp::create(builder, loc, mlir::TypeRange{}, {}, {}, + "cp.async.bulk.commit_group", {}); + mlir::NVVM::CpAsyncBulkWaitGroupOp::create(builder, loc, + builder.getI32IntegerAttr(0), {}); +} + +// TMA_BULK_STORE_C4 (CUDA) +void IntrinsicLibrary::genTMABulkStoreC4( + llvm::ArrayRef<fir::ExtendedValue> args) { + assert(args.size() == 3); + mlir::Value eleSize = + builder.createIntegerConstant(loc, builder.getI32Type(), 8); + genTMABulkStore(builder, loc, fir::getBase(args[0]), fir::getBase(args[1]), + fir::getBase(args[2]), eleSize); +} + +// TMA_BULK_STORE_C8 (CUDA) +void IntrinsicLibrary::genTMABulkStoreC8( + llvm::ArrayRef<fir::ExtendedValue> args) { + assert(args.size() == 3); + mlir::Value eleSize = + builder.createIntegerConstant(loc, builder.getI32Type(), 16); + genTMABulkStore(builder, loc, fir::getBase(args[0]), fir::getBase(args[1]), + fir::getBase(args[2]), eleSize); +} + +// TMA_BULK_STORE_I4 (CUDA) +void IntrinsicLibrary::genTMABulkStoreI4( + llvm::ArrayRef<fir::ExtendedValue> args) { + assert(args.size() == 3); + mlir::Value eleSize = + builder.createIntegerConstant(loc, builder.getI32Type(), 4); + genTMABulkStore(builder, loc, fir::getBase(args[0]), fir::getBase(args[1]), + fir::getBase(args[2]), eleSize); +} + +// TMA_BULK_STORE_I8 (CUDA) +void IntrinsicLibrary::genTMABulkStoreI8( + llvm::ArrayRef<fir::ExtendedValue> args) { + assert(args.size() == 3); + mlir::Value eleSize = + builder.createIntegerConstant(loc, builder.getI32Type(), 8); + genTMABulkStore(builder, loc, fir::getBase(args[0]), fir::getBase(args[1]), + fir::getBase(args[2]), eleSize); +} + +// TMA_BULK_STORE_R2 (CUDA) +void IntrinsicLibrary::genTMABulkStoreR2( + llvm::ArrayRef<fir::ExtendedValue> args) { + assert(args.size() == 3); + mlir::Value eleSize = + builder.createIntegerConstant(loc, builder.getI32Type(), 2); + genTMABulkStore(builder, loc, fir::getBase(args[0]), fir::getBase(args[1]), + fir::getBase(args[2]), eleSize); +} + +// TMA_BULK_STORE_R4 (CUDA) +void IntrinsicLibrary::genTMABulkStoreR4( + llvm::ArrayRef<fir::ExtendedValue> args) { + assert(args.size() == 3); + mlir::Value eleSize = + builder.createIntegerConstant(loc, builder.getI32Type(), 4); + genTMABulkStore(builder, loc, fir::getBase(args[0]), fir::getBase(args[1]), + fir::getBase(args[2]), eleSize); +} + +// TMA_BULK_STORE_R8 (CUDA) +void IntrinsicLibrary::genTMABulkStoreR8( + llvm::ArrayRef<fir::ExtendedValue> args) { + assert(args.size() == 3); + mlir::Value eleSize = + builder.createIntegerConstant(loc, builder.getI32Type(), 8); + genTMABulkStore(builder, loc, fir::getBase(args[0]), fir::getBase(args[1]), + fir::getBase(args[2]), eleSize); } // TMA_BULK_WAIT_GROUP (CUDA) diff --git a/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp b/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp index 0776346..8ca2869 100644 --- a/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp +++ b/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp @@ -143,7 +143,8 @@ public: llvm::SmallVector<mlir::Type> operandsTypes; for (auto arg : gpuLaunchFunc.getKernelOperands()) operandsTypes.push_back(arg.getType()); - auto fctTy = mlir::FunctionType::get(&context, operandsTypes, {}); + auto fctTy = mlir::FunctionType::get(&context, operandsTypes, + gpuLaunchFunc.getResultTypes()); if (!hasPortableSignature(fctTy, op)) convertCallOp(gpuLaunchFunc, fctTy); } else if (auto addr = mlir::dyn_cast<fir::AddrOfOp>(op)) { @@ -520,10 +521,14 @@ public: llvm::SmallVector<mlir::Value, 1> newCallResults; // TODO propagate/update call argument and result attributes. if constexpr (std::is_same_v<std::decay_t<A>, mlir::gpu::LaunchFuncOp>) { + mlir::Value asyncToken = callOp.getAsyncToken(); auto newCall = A::create(*rewriter, loc, callOp.getKernel(), callOp.getGridSizeOperandValues(), callOp.getBlockSizeOperandValues(), - callOp.getDynamicSharedMemorySize(), newOpers); + callOp.getDynamicSharedMemorySize(), newOpers, + asyncToken ? asyncToken.getType() : nullptr, + callOp.getAsyncDependencies(), + /*clusterSize=*/std::nullopt); if (callOp.getClusterSizeX()) newCall.getClusterSizeXMutable().assign(callOp.getClusterSizeX()); if (callOp.getClusterSizeY()) diff --git a/flang/lib/Parser/openmp-parsers.cpp b/flang/lib/Parser/openmp-parsers.cpp index d1e081c..4159d2e 100644 --- a/flang/lib/Parser/openmp-parsers.cpp +++ b/flang/lib/Parser/openmp-parsers.cpp @@ -275,6 +275,13 @@ struct SpecificModifierParser { // --- Iterator helpers ----------------------------------------------- +static EntityDecl MakeEntityDecl(ObjectName &&name) { + return EntityDecl( + /*ObjectName=*/std::move(name), std::optional<ArraySpec>{}, + std::optional<CoarraySpec>{}, std::optional<CharLength>{}, + std::optional<Initialization>{}); +} + // [5.0:47:17-18] In an iterator-specifier, if the iterator-type is not // specified then the type of that iterator is default integer. // [5.0:49:14] The iterator-type must be an integer type. @@ -282,11 +289,7 @@ static std::list<EntityDecl> makeEntityList(std::list<ObjectName> &&names) { std::list<EntityDecl> entities; for (auto iter = names.begin(), end = names.end(); iter != end; ++iter) { - EntityDecl entityDecl( - /*ObjectName=*/std::move(*iter), std::optional<ArraySpec>{}, - std::optional<CoarraySpec>{}, std::optional<CharLength>{}, - std::optional<Initialization>{}); - entities.push_back(std::move(entityDecl)); + entities.push_back(MakeEntityDecl(std::move(*iter))); } return entities; } @@ -306,6 +309,217 @@ static TypeDeclarationStmt makeIterSpecDecl(std::list<ObjectName> &&names) { makeEntityList(std::move(names))); } +// --- Stylized expression handling ----------------------------------- + +// OpenMP has a concept of am "OpenMP stylized expression". Syntactially +// it looks like a typical Fortran expression (or statement), except: +// - the only variables allowed in it are OpenMP special variables, the +// exact set of these variables depends on the specific case of the +// stylized expression +// - the special OpenMP variables present may assume one or more types, +// and the expression should be semantically valid for each type. +// +// The stylized expression can be thought of as a template, which will be +// instantiated for each type provided somewhere in the context in which +// the stylized expression appears. +// +// AST nodes: +// - OmpStylizedExpression: contains the source string for the expression, +// plus the list of instances (OmpStylizedInstance). +// - OmpStylizedInstance: corresponds to the instantiation of the stylized +// expression for a specific type. The way that the type is specified is +// by creating declarations (OmpStylizedDeclaration) for the special +// variables. Together with the AST tree corresponding to the stylized +// expression the instantiation has enough information for semantic +// analysis. Each instance has its own scope, and the special variables +// have their own Symbol's (local to the scope). +// - OmpStylizedDeclaration: encapsulates the information that the visitors +// in resolve-names can use to "emulate" a declaration for a special +// variable and allow name resolution in the instantiation AST to work. +// +// Implementation specifics: +// The semantic analysis stores "evaluate::Expr" in each AST node rooted +// in parser::Expr (in the typedExpr member). The evaluate::Expr is specific +// to a given type, and so to allow different types for a given expression, +// for each type a separate copy of the parser::Expr subtree is created. +// Normally, AST nodes are non-copyable (copy-ctor is deleted), so to create +// several copies of a subtree, the same source string is parsed several +// times. The ParseState member in OmpStylizedExpression is the parser state +// immediately before the stylized expression. +// +// Initially, when OmpStylizedExpression is first created, the expression is +// parsed as if it was an actual code, but this parsing is only done to +// establish where the stylized expression ends (in the source). The source +// and the initial parser state are stored in the object, and the instance +// list is empty. +// Once the parsing of the containing OmpDirectiveSpecification completes, +// a post-processing "parser" (OmpStylizedInstanceCreator) executes. This +// post-processor examines the directive specification to see if it expects +// any stylized expressions to be contained in it, and then instantiates +// them for each such directive. + +template <typename A> struct NeverParser { + using resultType = A; + std::optional<resultType> Parse(ParseState &state) const { + // Always fail, but without any messages. + return std::nullopt; + } +}; + +template <typename A> constexpr auto never() { return NeverParser<A>{}; } + +// Parser for optional<T> which always succeeds and returns std::nullptr. +// It's only needed to produce "std::optional<CallStmt::Chevrons>" in +// CallStmt. +template <typename A, typename B = void> struct NullParser; +template <typename B> struct NullParser<std::optional<B>> { + using resultType = std::optional<B>; + std::optional<resultType> Parse(ParseState &) const { + return resultType{std::nullopt}; + } +}; + +template <typename A> constexpr auto null() { return NullParser<A>{}; } + +// OmpStylizedDeclaration and OmpStylizedInstance are helper classes, and +// don't correspond to anything in the source. Their parsers should still +// exist, but they should never be executed. +TYPE_PARSER(construct<OmpStylizedDeclaration>(never<OmpStylizedDeclaration>())) +TYPE_PARSER(construct<OmpStylizedInstance>(never<OmpStylizedInstance>())) + +TYPE_PARSER( // + construct<OmpStylizedInstance::Instance>(Parser<AssignmentStmt>{}) || + construct<OmpStylizedInstance::Instance>( + sourced(construct<CallStmt>(Parser<ProcedureDesignator>{}, + null<std::optional<CallStmt::Chevrons>>(), + parenthesized(optionalList(actualArgSpec))))) || + construct<OmpStylizedInstance::Instance>(indirect(expr))) + +struct OmpStylizedExpressionParser { + using resultType = OmpStylizedExpression; + + std::optional<resultType> Parse(ParseState &state) const { + auto *saved{new ParseState(state)}; + auto getSource{verbatim(Parser<OmpStylizedInstance::Instance>{} >> ok)}; + if (auto &&ok{getSource.Parse(state)}) { + OmpStylizedExpression result{std::list<OmpStylizedInstance>{}}; + result.source = ok->source; + result.state = saved; + // result.v remains empty + return std::move(result); + } + delete saved; + return std::nullopt; + } +}; + +static void Instantiate(OmpStylizedExpression &ose, + llvm::ArrayRef<const OmpTypeName *> types, llvm::ArrayRef<CharBlock> vars) { + // 1. For each var in the vars list, declare it with the corresponding + // type from types. + // 2. Run the parser to get the AST for the stylized expression. + // 3. Create OmpStylizedInstance and append it to the list in ose. + assert(types.size() == vars.size() && "List size mismatch"); + // A ParseState object is irreversibly modified during parsing (in + // particular, it cannot be rewound to an earlier position in the source). + // Because of that we need to create a local copy for each instantiation. + // If rewinding was possible, we could just use the current one, and we + // wouldn't need to save it in the AST node. + ParseState state{DEREF(ose.state)}; + + std::list<OmpStylizedDeclaration> decls; + for (auto [type, var] : llvm::zip_equal(types, vars)) { + decls.emplace_back(OmpStylizedDeclaration{ + common::Reference(*type), MakeEntityDecl(Name{var})}); + } + + if (auto &&instance{Parser<OmpStylizedInstance::Instance>{}.Parse(state)}) { + ose.v.emplace_back( + OmpStylizedInstance{std::move(decls), std::move(*instance)}); + } +} + +static void InstantiateForTypes(OmpStylizedExpression &ose, + const OmpTypeNameList &typeNames, llvm::ArrayRef<CharBlock> vars) { + // For each type in the type list, declare all variables in vars with + // that type, and complete the instantiation. + for (const OmpTypeName &t : typeNames.v) { + std::vector<const OmpTypeName *> types(vars.size(), &t); + Instantiate(ose, types, vars); + } +} + +static void InstantiateDeclareReduction(OmpDirectiveSpecification &spec) { + // There can be arguments/clauses that don't make sense, that analysis + // is left until semantic checks. Tolerate any unexpected stuff. + auto *rspec{GetFirstArgument<OmpReductionSpecifier>(spec)}; + if (!rspec) { + return; + } + + const OmpTypeNameList *typeNames{nullptr}; + + if (auto *cexpr{ + const_cast<OmpCombinerExpression *>(GetCombinerExpr(*rspec))}) { + typeNames = &std::get<OmpTypeNameList>(rspec->t); + + InstantiateForTypes(*cexpr, *typeNames, OmpCombinerExpression::Variables()); + delete cexpr->state; + cexpr->state = nullptr; + } else { + // If there are no types, there is nothing else to do. + return; + } + + for (const OmpClause &clause : spec.Clauses().v) { + llvm::omp::Clause id{clause.Id()}; + if (id == llvm::omp::Clause::OMPC_initializer) { + if (auto *iexpr{const_cast<OmpInitializerExpression *>( + GetInitializerExpr(clause))}) { + InstantiateForTypes( + *iexpr, *typeNames, OmpInitializerExpression::Variables()); + delete iexpr->state; + iexpr->state = nullptr; + } + } + } +} + +static void InstantiateStylizedDirective(OmpDirectiveSpecification &spec) { + const OmpDirectiveName &dirName{spec.DirName()}; + if (dirName.v == llvm::omp::Directive::OMPD_declare_reduction) { + InstantiateDeclareReduction(spec); + } +} + +template <typename P, + typename = std::enable_if_t< + std::is_same_v<typename P::resultType, OmpDirectiveSpecification>>> +struct OmpStylizedInstanceCreator { + using resultType = OmpDirectiveSpecification; + constexpr OmpStylizedInstanceCreator(P p) : parser_(p) {} + + std::optional<resultType> Parse(ParseState &state) const { + if (auto &&spec{parser_.Parse(state)}) { + InstantiateStylizedDirective(*spec); + return std::move(spec); + } + return std::nullopt; + } + +private: + const P parser_; +}; + +template <typename P> +OmpStylizedInstanceCreator(P) -> OmpStylizedInstanceCreator<P>; + +// --- Parsers for types ---------------------------------------------- + +TYPE_PARSER( // + sourced(construct<OmpTypeName>(Parser<DeclarationTypeSpec>{})) || + sourced(construct<OmpTypeName>(Parser<TypeSpec>{}))) + // --- Parsers for arguments ------------------------------------------ // At the moment these are only directive arguments. This is needed for @@ -366,10 +580,6 @@ struct OmpArgumentListParser { } }; -TYPE_PARSER( // - construct<OmpTypeName>(Parser<DeclarationTypeSpec>{}) || - construct<OmpTypeName>(Parser<TypeSpec>{})) - // 2.15.3.6 REDUCTION (reduction-identifier: variable-name-list) TYPE_PARSER(construct<OmpReductionIdentifier>(Parser<DefinedOperator>{}) || construct<OmpReductionIdentifier>(Parser<ProcedureDesignator>{})) @@ -1065,7 +1275,8 @@ TYPE_PARSER(construct<OmpOtherwiseClause>( TYPE_PARSER(construct<OmpWhenClause>( maybe(nonemptyList(Parser<OmpWhenClause::Modifier>{}) / ":"), - maybe(indirect(Parser<OmpDirectiveSpecification>{})))) + maybe(indirect( + OmpStylizedInstanceCreator(Parser<OmpDirectiveSpecification>{}))))) // OMP 5.2 12.6.1 grainsize([ prescriptiveness :] scalar-integer-expression) TYPE_PARSER(construct<OmpGrainsizeClause>( @@ -1777,12 +1988,7 @@ TYPE_PARSER( Parser<OpenMPInteropConstruct>{})) / endOfLine) -TYPE_PARSER(construct<OmpInitializerProc>(Parser<ProcedureDesignator>{}, - parenthesized(many(maybe(","_tok) >> Parser<ActualArgSpec>{})))) - -TYPE_PARSER(construct<OmpInitializerClause>( - construct<OmpInitializerClause>(assignmentStmt) || - construct<OmpInitializerClause>(Parser<OmpInitializerProc>{}))) +TYPE_PARSER(construct<OmpInitializerClause>(Parser<OmpInitializerExpression>{})) // OpenMP 5.2: 7.5.4 Declare Variant directive TYPE_PARSER(sourced(construct<OmpDeclareVariantDirective>( @@ -1794,7 +2000,7 @@ TYPE_PARSER(sourced(construct<OmpDeclareVariantDirective>( TYPE_PARSER(sourced(construct<OpenMPDeclareReductionConstruct>( predicated(Parser<OmpDirectiveName>{}, IsDirective(llvm::omp::Directive::OMPD_declare_reduction)) >= - Parser<OmpDirectiveSpecification>{}))) + OmpStylizedInstanceCreator(Parser<OmpDirectiveSpecification>{})))) // 2.10.6 Declare Target Construct TYPE_PARSER(sourced(construct<OpenMPDeclareTargetConstruct>( @@ -1832,8 +2038,8 @@ TYPE_PARSER(sourced(construct<OpenMPDeclareMapperConstruct>( IsDirective(llvm::omp::Directive::OMPD_declare_mapper)) >= Parser<OmpDirectiveSpecification>{}))) -TYPE_PARSER(construct<OmpCombinerExpression>(Parser<AssignmentStmt>{}) || - construct<OmpCombinerExpression>(Parser<FunctionReference>{})) +TYPE_PARSER(construct<OmpCombinerExpression>(OmpStylizedExpressionParser{})) +TYPE_PARSER(construct<OmpInitializerExpression>(OmpStylizedExpressionParser{})) TYPE_PARSER(sourced(construct<OpenMPCriticalConstruct>( OmpBlockConstructParser{llvm::omp::Directive::OMPD_critical}))) diff --git a/flang/lib/Parser/openmp-utils.cpp b/flang/lib/Parser/openmp-utils.cpp index 937a17f..95ad3f6 100644 --- a/flang/lib/Parser/openmp-utils.cpp +++ b/flang/lib/Parser/openmp-utils.cpp @@ -74,4 +74,16 @@ const BlockConstruct *GetFortranBlockConstruct( return nullptr; } +const OmpCombinerExpression *GetCombinerExpr( + const OmpReductionSpecifier &rspec) { + return addr_if(std::get<std::optional<OmpCombinerExpression>>(rspec.t)); +} + +const OmpInitializerExpression *GetInitializerExpr(const OmpClause &init) { + if (auto *wrapped{std::get_if<OmpClause::Initializer>(&init.u)}) { + return &wrapped->v.v; + } + return nullptr; +} + } // namespace Fortran::parser::omp diff --git a/flang/lib/Parser/parse-tree.cpp b/flang/lib/Parser/parse-tree.cpp index 8cbaa39..ad0016e 100644 --- a/flang/lib/Parser/parse-tree.cpp +++ b/flang/lib/Parser/parse-tree.cpp @@ -11,6 +11,7 @@ #include "flang/Common/indirection.h" #include "flang/Parser/tools.h" #include "flang/Parser/user-state.h" +#include "llvm/ADT/ArrayRef.h" #include "llvm/Frontend/OpenMP/OMP.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> @@ -430,4 +431,30 @@ const OmpClauseList &OmpDirectiveSpecification::Clauses() const { } return empty; } + +static bool InitCharBlocksFromStrings(llvm::MutableArrayRef<CharBlock> blocks, + llvm::ArrayRef<std::string> strings) { + for (auto [i, n] : llvm::enumerate(strings)) { + blocks[i] = CharBlock(n); + } + return true; +} + +// The names should have static storage duration. Keep these names +// in a sigle place. +llvm::ArrayRef<CharBlock> OmpCombinerExpression::Variables() { + static std::string names[]{"omp_in", "omp_out"}; + static CharBlock vars[std::size(names)]; + + [[maybe_unused]] static bool init = InitCharBlocksFromStrings(vars, names); + return vars; +} + +llvm::ArrayRef<CharBlock> OmpInitializerExpression::Variables() { + static std::string names[]{"omp_orig", "omp_priv"}; + static CharBlock vars[std::size(names)]; + + [[maybe_unused]] static bool init = InitCharBlocksFromStrings(vars, names); + return vars; +} } // namespace Fortran::parser diff --git a/flang/lib/Parser/unparse.cpp b/flang/lib/Parser/unparse.cpp index 20a8d2a..9b38cfc 100644 --- a/flang/lib/Parser/unparse.cpp +++ b/flang/lib/Parser/unparse.cpp @@ -2095,15 +2095,13 @@ public: // OpenMP Clauses & Directives void Unparse(const OmpArgumentList &x) { Walk(x.v, ", "); } + void Unparse(const OmpTypeNameList &x) { Walk(x.v, ", "); } void Unparse(const OmpBaseVariantNames &x) { Walk(std::get<0>(x.t)); // OmpObject Put(":"); Walk(std::get<1>(x.t)); // OmpObject } - void Unparse(const OmpTypeNameList &x) { // - Walk(x.v, ","); - } void Unparse(const OmpMapperSpecifier &x) { const auto &mapperName{std::get<std::string>(x.t)}; if (mapperName.find(llvm::omp::OmpDefaultMapperName) == std::string::npos) { @@ -2202,6 +2200,15 @@ public: unsigned ompVersion{langOpts_.OpenMPVersion}; Word(llvm::omp::getOpenMPDirectiveName(x.v, ompVersion)); } + void Unparse(const OmpStylizedDeclaration &x) { + // empty + } + void Unparse(const OmpStylizedExpression &x) { // + Put(x.source.ToString()); + } + void Unparse(const OmpStylizedInstance &x) { + // empty + } void Unparse(const OmpIteratorSpecifier &x) { Walk(std::get<TypeDeclarationStmt>(x.t)); Put(" = "); @@ -2511,29 +2518,11 @@ public: void Unparse(const OpenMPCriticalConstruct &x) { Unparse(static_cast<const OmpBlockConstruct &>(x)); } - void Unparse(const OmpInitializerProc &x) { - Walk(std::get<ProcedureDesignator>(x.t)); - Put("("); - Walk(std::get<std::list<ActualArgSpec>>(x.t)); - Put(")"); - } - void Unparse(const OmpInitializerClause &x) { - // Don't let the visitor go to the normal AssignmentStmt Unparse function, - // it adds an extra newline that we don't want. - if (const auto *assignment{std::get_if<AssignmentStmt>(&x.u)}) { - Walk(assignment->t, " = "); - } else { - Walk(x.u); - } + void Unparse(const OmpInitializerExpression &x) { + Unparse(static_cast<const OmpStylizedExpression &>(x)); } void Unparse(const OmpCombinerExpression &x) { - // Don't let the visitor go to the normal AssignmentStmt Unparse function, - // it adds an extra newline that we don't want. - if (const auto *assignment{std::get_if<AssignmentStmt>(&x.u)}) { - Walk(assignment->t, " = "); - } else { - Walk(x.u); - } + Unparse(static_cast<const OmpStylizedExpression &>(x)); } void Unparse(const OpenMPDeclareReductionConstruct &x) { BeginOpenMP(); diff --git a/flang/lib/Semantics/resolve-directives.cpp b/flang/lib/Semantics/resolve-directives.cpp index 196755e..628068f 100644 --- a/flang/lib/Semantics/resolve-directives.cpp +++ b/flang/lib/Semantics/resolve-directives.cpp @@ -26,6 +26,8 @@ #include "flang/Semantics/symbol.h" #include "flang/Semantics/tools.h" #include "flang/Support/Flags.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringRef.h" #include "llvm/Frontend/OpenMP/OMP.h.inc" #include "llvm/Support/Debug.h" #include <list> @@ -453,6 +455,21 @@ public: return true; } + bool Pre(const parser::OmpStylizedDeclaration &x) { + static llvm::StringMap<Symbol::Flag> map{ + {"omp_in", Symbol::Flag::OmpInVar}, + {"omp_orig", Symbol::Flag::OmpOrigVar}, + {"omp_out", Symbol::Flag::OmpOutVar}, + {"omp_priv", Symbol::Flag::OmpPrivVar}, + }; + if (auto &name{std::get<parser::ObjectName>(x.var.t)}; name.symbol) { + if (auto found{map.find(name.ToString())}; found != map.end()) { + ResolveOmp(name, found->second, + const_cast<Scope &>(DEREF(name.symbol).owner())); + } + } + return false; + } bool Pre(const parser::OmpMetadirectiveDirective &x) { PushContext(x.v.source, llvm::omp::Directive::OMPD_metadirective); return true; diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp index 93faba7..0e6d4c7 100644 --- a/flang/lib/Semantics/resolve-names.cpp +++ b/flang/lib/Semantics/resolve-names.cpp @@ -1605,6 +1605,12 @@ public: Post(static_cast<const parser::OmpDirectiveSpecification &>(x)); } + void Post(const parser::OmpTypeName &); + bool Pre(const parser::OmpStylizedDeclaration &); + void Post(const parser::OmpStylizedDeclaration &); + bool Pre(const parser::OmpStylizedInstance &); + void Post(const parser::OmpStylizedInstance &); + bool Pre(const parser::OpenMPDeclareMapperConstruct &x) { AddOmpSourceRange(x.source); return true; @@ -1615,18 +1621,6 @@ public: return true; } - bool Pre(const parser::OmpInitializerProc &x) { - auto &procDes = std::get<parser::ProcedureDesignator>(x.t); - auto &name = std::get<parser::Name>(procDes.u); - auto *symbol{FindSymbol(NonDerivedTypeScope(), name)}; - if (!symbol) { - context().Say(name.source, - "Implicit subroutine declaration '%s' in DECLARE REDUCTION"_err_en_US, - name.source); - } - return true; - } - bool Pre(const parser::OmpDeclareVariantDirective &x) { AddOmpSourceRange(x.source); return true; @@ -1772,14 +1766,6 @@ public: messageHandler().set_currStmtSource(std::nullopt); } - bool Pre(const parser::OmpTypeName &x) { - BeginDeclTypeSpec(); - return true; - } - void Post(const parser::OmpTypeName &x) { // - EndDeclTypeSpec(); - } - bool Pre(const parser::OpenMPConstruct &x) { // Indicate that the current directive is not a declarative one. declaratives_.push_back(nullptr); @@ -1835,6 +1821,30 @@ void OmpVisitor::Post(const parser::OmpBlockConstruct &x) { } } +void OmpVisitor::Post(const parser::OmpTypeName &x) { + x.declTypeSpec = GetDeclTypeSpec(); +} + +bool OmpVisitor::Pre(const parser::OmpStylizedDeclaration &x) { + BeginDecl(); + Walk(x.type.get()); + Walk(x.var); + return true; +} + +void OmpVisitor::Post(const parser::OmpStylizedDeclaration &x) { // + EndDecl(); +} + +bool OmpVisitor::Pre(const parser::OmpStylizedInstance &x) { + PushScope(Scope::Kind::OtherConstruct, nullptr); + return true; +} + +void OmpVisitor::Post(const parser::OmpStylizedInstance &x) { // + PopScope(); +} + bool OmpVisitor::Pre(const parser::OmpMapClause &x) { auto &mods{OmpGetModifiers(x)}; if (auto *mapper{OmpGetUniqueModifier<parser::OmpMapper>(mods)}) { @@ -1969,51 +1979,20 @@ void OmpVisitor::ProcessReductionSpecifier( } } - auto &typeList{std::get<parser::OmpTypeNameList>(spec.t)}; - - // Create a temporary variable declaration for the four variables - // used in the reduction specifier and initializer (omp_out, omp_in, - // omp_priv and omp_orig), with the type in the typeList. - // - // In theory it would be possible to create only variables that are - // actually used, but that requires walking the entire parse-tree of the - // expressions, and finding the relevant variables [there may well be other - // variables involved too]. - // - // This allows doing semantic analysis where the type is a derived type - // e.g omp_out%x = omp_out%x + omp_in%x. - // - // These need to be temporary (in their own scope). If they are created - // as variables in the outer scope, if there's more than one type in the - // typelist, duplicate symbols will be reported. - const parser::CharBlock ompVarNames[]{ - {"omp_in", 6}, {"omp_out", 7}, {"omp_priv", 8}, {"omp_orig", 8}}; - - for (auto &t : typeList.v) { - PushScope(Scope::Kind::OtherConstruct, nullptr); - BeginDeclTypeSpec(); - // We need to walk t.u because Walk(t) does it's own BeginDeclTypeSpec. - Walk(t.u); + reductionDetails->AddDecl(declaratives_.back()); - // Only process types we can find. There will be an error later on when - // a type isn't found. - if (const DeclTypeSpec *typeSpec{GetDeclTypeSpec()}) { - reductionDetails->AddType(*typeSpec); + // Do not walk OmpTypeNameList. The types on the list will be visited + // during procesing of OmpCombinerExpression. + Walk(std::get<std::optional<parser::OmpCombinerExpression>>(spec.t)); + Walk(clauses); - for (auto &nm : ompVarNames) { - ObjectEntityDetails details{}; - details.set_type(*typeSpec); - MakeSymbol(nm, Attrs{}, std::move(details)); - } + for (auto &type : std::get<parser::OmpTypeNameList>(spec.t).v) { + // The declTypeSpec can be null if there is some semantic error. + if (type.declTypeSpec) { + reductionDetails->AddType(*type.declTypeSpec); } - EndDeclTypeSpec(); - Walk(std::get<std::optional<parser::OmpCombinerExpression>>(spec.t)); - Walk(clauses); - PopScope(); } - reductionDetails->AddDecl(declaratives_.back()); - if (!symbol) { symbol = &MakeSymbol(mangledName, Attrs{}, std::move(*reductionDetails)); } diff --git a/flang/module/cudadevice.f90 b/flang/module/cudadevice.f90 index 5182950..59af58d 100644 --- a/flang/module/cudadevice.f90 +++ b/flang/module/cudadevice.f90 @@ -1998,6 +1998,18 @@ implicit none ! TMA Operations + interface barrier_arrive + attributes(device) function barrier_arrive(barrier) result(token) + integer(8), shared :: barrier + integer(8) :: token + end function + attributes(device) function barrier_arrive_cnt(barrier, count) result(token) + integer(8), shared :: barrier + integer(4), value :: count + integer(8) :: token + end function + end interface + interface attributes(device) subroutine barrier_init(barrier, count) integer(8), shared :: barrier @@ -2005,15 +2017,18 @@ implicit none end subroutine end interface - interface barrier_arrive - attributes(device) function barrier_arrive(barrier) result(token) + interface + attributes(device) integer function barrier_try_wait(barrier, token) integer(8), shared :: barrier - integer(8) :: token + integer(8), value :: token end function - attributes(device) function barrier_arrive_cnt(barrier, count) result(token) + end interface + + interface + attributes(device) integer function barrier_try_wait_sleep(barrier, token, ns) integer(8), shared :: barrier - integer(4), value :: count - integer(8) :: token + integer(8), value :: token + integer(4), value :: ns end function end interface @@ -2032,7 +2047,13 @@ implicit none end subroutine end interface + ! -------------------- + ! Bulk load functions + ! -------------------- + ! Generic load, count is in bytes + ! ------------------------------- + interface attributes(device) subroutine tma_bulk_g2s(barrier, src, dst, nbytes) !dir$ ignore_tkr src, dst @@ -2043,6 +2064,74 @@ implicit none end subroutine end interface + ! Load specific types, count is in elements + ! ----------------------------------------- + + interface tma_bulk_load + attributes(device) subroutine tma_bulk_ldc4(barrier, src, dst, nelems) + !dir$ ignore_tkr (r) src, (r) dst + integer(8), shared :: barrier + complex(4), device :: src(*) + complex(4), shared :: dst(*) + integer(4), value :: nelems + end subroutine + + attributes(device) subroutine tma_bulk_ldc8(barrier, src, dst, nelems) + !dir$ ignore_tkr (r) src, (r) dst + integer(8), shared :: barrier + complex(8), device :: src(*) + complex(8), shared :: dst(*) + integer(4), value :: nelems + end subroutine + + attributes(device) subroutine tma_bulk_ldi4(barrier, src, dst, nelems) + !dir$ ignore_tkr (r) src, (r) dst + integer(8), shared :: barrier + integer(4), device :: src(*) + integer(4), shared :: dst(*) + integer(4), value :: nelems + end subroutine + + attributes(device) subroutine tma_bulk_ldi8(barrier, src, dst, nelems) + !dir$ ignore_tkr (r) src, (r) dst + integer(8), shared :: barrier + integer(8), device :: src(*) + integer(8), shared :: dst(*) + integer(4), value :: nelems + end subroutine + + attributes(device) subroutine tma_bulk_ldr2(barrier, src, dst, nelems) + !dir$ ignore_tkr (r) src, (r) dst + integer(8), shared :: barrier + real(2), device :: src(*) + real(2), shared :: dst(*) + integer(4), value :: nelems + end subroutine + + attributes(device) subroutine tma_bulk_ldr4(barrier, src, dst, nelems) + !dir$ ignore_tkr (r) src, (r) dst + integer(8), shared :: barrier + real(4), device :: src(*) + real(4), shared :: dst(*) + integer(4), value :: nelems + end subroutine + + attributes(device) subroutine tma_bulk_ldr8(barrier, src, dst, nelems) + !dir$ ignore_tkr (r) src, (r) dst + integer(8), shared :: barrier + real(8), device :: src(*) + real(8), shared :: dst(*) + integer(4), value :: nelems + end subroutine + end interface + + ! -------------------- + ! Bulk Store functions + ! -------------------- + + ! Generic store, count is in bytes + ! -------------------------------- + interface attributes(device) subroutine tma_bulk_s2g(src, dst, nbytes) !dir$ ignore_tkr src, dst @@ -2052,6 +2141,60 @@ implicit none end subroutine end interface + ! Load specific types, count is in elements + ! ----------------------------------------- + + interface tma_bulk_store + attributes(device) subroutine tma_bulk_store_c4(src, dst, nelems) + !dir$ ignore_tkr (r) src, (r) dst + complex(4), shared :: src(*) + complex(4), device :: dst(*) + integer(4), value :: nelems + end subroutine + + attributes(device) subroutine tma_bulk_store_c8(src, dst, nelems) + !dir$ ignore_tkr (r) src, (r) dst + complex(8), shared :: src(*) + complex(8), device :: dst(*) + integer(4), value :: nelems + end subroutine + + attributes(device) subroutine tma_bulk_store_i4(src, dst, nelems) + !dir$ ignore_tkr (r) src, (r) dst + integer(4), shared :: src(*) + integer(4), device :: dst(*) + integer(4), value :: nelems + end subroutine + + attributes(device) subroutine tma_bulk_store_i8(src, dst, nelems) + !dir$ ignore_tkr (r) src, (r) dst + integer(8), shared :: src(*) + integer(8), device :: dst(*) + integer(4), value :: nelems + end subroutine + + attributes(device) subroutine tma_bulk_store_r2(src, dst, nelems) + !dir$ ignore_tkr (r) src, (r) dst + real(2), shared :: src(*) + real(2), device :: dst(*) + integer(4), value :: nelems + end subroutine + + attributes(device) subroutine tma_bulk_store_r4(src, dst, nelems) + !dir$ ignore_tkr (r) src, (r) dst + real(4), shared :: src(*) + real(4), device :: dst(*) + integer(4), value :: nelems + end subroutine + + attributes(device) subroutine tma_bulk_store_r8(src, dst, nelems) + !dir$ ignore_tkr (r) src, (r) dst + real(8), shared :: src(*) + real(8), device :: dst(*) + integer(4), value :: nelems + end subroutine + end interface + contains attributes(device) subroutine syncthreads() diff --git a/flang/test/Fir/CUDA/cuda-target-rewrite.mlir b/flang/test/Fir/CUDA/cuda-target-rewrite.mlir index 48fee10..5562e00 100644 --- a/flang/test/Fir/CUDA/cuda-target-rewrite.mlir +++ b/flang/test/Fir/CUDA/cuda-target-rewrite.mlir @@ -108,3 +108,23 @@ module attributes {gpu.container_module, fir.defaultkind = "a1c4d8i4l4r4", fir.k } } +// ----- + +module attributes {gpu.container_module, fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", llvm.target_triple = "x86_64-unknown-linux-gnu"} { + gpu.module @testmod { + gpu.func @_QPtest(%arg0: complex<f32>) -> () kernel { + gpu.return + } + } + func.func @main(%arg0: complex<f32>) { + %0 = llvm.mlir.constant(0 : i64) : i64 + %1 = llvm.mlir.constant(0 : i32) : i32 + %2 = fir.alloca i64 + %3 = cuf.stream_cast %2 : !fir.ref<i64> + %4 = gpu.launch_func async [%3] @testmod::@_QPtest blocks in (%0, %0, %0) threads in (%0, %0, %0) : i64 dynamic_shared_memory_size %1 args(%arg0 : complex<f32>) {cuf.proc_attr = #cuf.cuda_proc<global>} + return + } +} + +// CHECK-LABEL: func.func @main +// CHECK: %{{.*}} = gpu.launch_func async [%{{.*}}] @testmod::@_QPtest blocks in (%{{.*}}, %{{.*}}, %{{.*}}) threads in (%{{.*}}, %{{.*}}, %{{.*}}) : i64 dynamic_shared_memory_size %{{.*}} args(%{{.*}} : !fir.vector<2:f32>) {cuf.proc_attr = #cuf.cuda_proc<global>} diff --git a/flang/test/Lower/CUDA/cuda-device-proc.cuf b/flang/test/Lower/CUDA/cuda-device-proc.cuf index 5c4c3c6..8f35521 100644 --- a/flang/test/Lower/CUDA/cuda-device-proc.cuf +++ b/flang/test/Lower/CUDA/cuda-device-proc.cuf @@ -479,6 +479,8 @@ end subroutine ! CHECK-LABEL: func.func @_QPtest_bulk_s2g ! CHECL: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.cp.async.bulk.wait_group 0 attributes(device) subroutine testAtomicCasLoop(aa, n) integer :: a @@ -492,3 +494,250 @@ end subroutine ! CHECK: %[[CASTED_CMP_XCHG_EV:.*]] = fir.convert %[[CMP_XCHG_EV]] : (i1) -> i32 ! CHECK: %{{.*}} = arith.constant 1 : i32 ! CHECK: %19 = arith.cmpi eq, %[[CASTED_CMP_XCHG_EV]], %{{.*}} : i32 + +attributes(global) subroutine test_barrier_try_wait() + integer :: istat + integer(8), shared :: barrier1 + integer(8) :: token + istat = barrier_try_wait(barrier1, token) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_barrier_try_wait() +! CHECK: scf.while +! CHECK: %{{.*}} = nvvm.inline_ptx ".reg .pred p; mbarrier.try_wait.shared.b64 p, [%{{.*}}], %{{.*}}, %{{.*}}; selp.b32 %{{.*}}, 1, 0, p;" ro(%{{.*}}, %{{.*}}, %c1000000{{.*}} : !llvm.ptr, i64, i32) -> i32 + +attributes(global) subroutine test_barrier_try_wait_sleep() + integer :: istat + integer(8), shared :: barrier1 + integer(8) :: token + integer(4) :: sleep_time + istat = barrier_try_wait_sleep(barrier1, token, sleep_time) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_barrier_try_wait_sleep() +! CHECK: %{{.*}} = nvvm.inline_ptx ".reg .pred p; mbarrier.try_wait.shared.b64 p, [%{{.*}}], %{{.*}}, %{{.*}}; selp.b32 %0, 1, 0, p;" ro(%{{.*}}, %{{.*}}, %{{.*}} : !llvm.ptr, i64, i32) -> i32 + +attributes(global) subroutine test_tma_bulk_load_c4(a, n) + integer(8), shared :: barrier1 + integer, value :: n + complex(4), device :: r8(n) + complex(4), shared :: tmp(1024) + integer(4) :: j, elem_count + call tma_bulk_load(barrier1, r8(j), tmp, elem_count) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_tma_bulk_load_c4 +! CHECK: %[[BARRIER:.*]]:2 = hlfir.declare %{{.*}} {data_attr = #cuf.cuda<shared>, uniq_name = "_QFtest_tma_bulk_load_c4Ebarrier1"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>) +! CHECK: %[[ELEM_COUNT:.*]]:2 = hlfir.declare %{{.*}} {data_attr = #cuf.cuda<device>, uniq_name = "_QFtest_tma_bulk_load_c4Eelem_count"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>) +! CHECK: %[[COUNT:.*]] = fir.load %[[ELEM_COUNT]]#0 : !fir.ref<i32> +! CHECK: %[[ELEM_SIZE:.*]] = arith.constant 8 : i32 +! CHECK: %[[SIZE:.*]] = arith.muli %[[COUNT]], %[[ELEM_SIZE]] : i32 +! CHECK: %[[BARRIER_PTR:.*]] = fir.convert %[[BARRIER]]#0 : (!fir.ref<i64>) -> !llvm.ptr +! CHECK: nvvm.inline_ptx "cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%0], [%1], %2, [%3];" ro(%{{.*}}, %{{.*}}, %[[SIZE]], %[[BARRIER_PTR]] : !fir.ref<!fir.array<1024xcomplex<f32>>>, !fir.ref<complex<f32>>, i32, !llvm.ptr) +! CHECK: nvvm.inline_ptx "mbarrier.expect_tx.relaxed.cta.shared::cta.b64 [%0], %1;" ro(%[[BARRIER_PTR]], %[[SIZE]] : !llvm.ptr, i32) + +attributes(global) subroutine test_tma_bulk_load_c8(a, n) + integer(8), shared :: barrier1 + integer, value :: n + complex(8), device :: r8(n) + complex(8), shared :: tmp(1024) + integer(4) :: j, elem_count + call tma_bulk_load(barrier1, r8(j), tmp, elem_count) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_tma_bulk_load_c8 +! CHECK: %[[BARRIER:.*]]:2 = hlfir.declare %{{.*}} {data_attr = #cuf.cuda<shared>, uniq_name = "_QFtest_tma_bulk_load_c8Ebarrier1"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>) +! CHECK: %[[ELEM_COUNT:.*]]:2 = hlfir.declare %{{.*}} {data_attr = #cuf.cuda<device>, uniq_name = "_QFtest_tma_bulk_load_c8Eelem_count"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>) +! CHECK: %[[COUNT:.*]] = fir.load %[[ELEM_COUNT]]#0 : !fir.ref<i32> +! CHECK: %[[ELEM_SIZE:.*]] = arith.constant 16 : i32 +! CHECK: %[[SIZE:.*]] = arith.muli %[[COUNT]], %[[ELEM_SIZE]] : i32 +! CHECK: %[[BARRIER_PTR:.*]] = fir.convert %[[BARRIER]]#0 : (!fir.ref<i64>) -> !llvm.ptr +! CHECK: nvvm.inline_ptx "cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%0], [%1], %2, [%3];" ro(%{{.*}}, %{{.*}}, %[[SIZE]], %[[BARRIER_PTR]] : !fir.ref<!fir.array<1024xcomplex<f64>>>, !fir.ref<complex<f64>>, i32, !llvm.ptr) +! CHECK: nvvm.inline_ptx "mbarrier.expect_tx.relaxed.cta.shared::cta.b64 [%0], %1;" ro(%[[BARRIER_PTR]], %[[SIZE]] : !llvm.ptr, i32) + +attributes(global) subroutine test_tma_bulk_load_i4(a, n) + integer(8), shared :: barrier1 + integer, value :: n + integer(4), device :: r8(n) + integer(4), shared :: tmp(1024) + integer(4) :: j, elem_count + call tma_bulk_load(barrier1, r8(j), tmp, elem_count) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_tma_bulk_load_i4 +! CHECK: %[[BARRIER:.*]]:2 = hlfir.declare %{{.*}} {data_attr = #cuf.cuda<shared>, uniq_name = "_QFtest_tma_bulk_load_i4Ebarrier1"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>) +! CHECK: %[[ELEM_COUNT:.*]]:2 = hlfir.declare %{{.*}} {data_attr = #cuf.cuda<device>, uniq_name = "_QFtest_tma_bulk_load_i4Eelem_count"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>) +! CHECK: %[[COUNT:.*]] = fir.load %[[ELEM_COUNT]]#0 : !fir.ref<i32> +! CHECK: %[[ELEM_SIZE:.*]] = arith.constant 4 : i32 +! CHECK: %[[SIZE:.*]] = arith.muli %[[COUNT]], %[[ELEM_SIZE]] : i32 +! CHECK: %[[BARRIER_PTR:.*]] = fir.convert %[[BARRIER]]#0 : (!fir.ref<i64>) -> !llvm.ptr +! CHECK: nvvm.inline_ptx "cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%0], [%1], %2, [%3];" ro(%{{.*}}, %{{.*}}, %[[SIZE]], %[[BARRIER_PTR]] : !fir.ref<!fir.array<1024xi32>>, !fir.ref<i32>, i32, !llvm.ptr) +! CHECK: nvvm.inline_ptx "mbarrier.expect_tx.relaxed.cta.shared::cta.b64 [%0], %1;" ro(%[[BARRIER_PTR]], %[[SIZE]] : !llvm.ptr, i32) + +attributes(global) subroutine test_tma_bulk_load_i8(a, n) + integer(8), shared :: barrier1 + integer, value :: n + integer(8), device :: r8(n) + integer(8), shared :: tmp(1024) + integer(4) :: j, elem_count + call tma_bulk_load(barrier1, r8(j), tmp, elem_count) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_tma_bulk_load_i8 +! CHECK: %[[BARRIER:.*]]:2 = hlfir.declare %{{.*}} {data_attr = #cuf.cuda<shared>, uniq_name = "_QFtest_tma_bulk_load_i8Ebarrier1"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>) +! CHECK: %[[ELEM_COUNT:.*]]:2 = hlfir.declare %{{.*}} {data_attr = #cuf.cuda<device>, uniq_name = "_QFtest_tma_bulk_load_i8Eelem_count"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>) +! CHECK: %[[COUNT:.*]] = fir.load %[[ELEM_COUNT]]#0 : !fir.ref<i32> +! CHECK: %[[ELEM_SIZE:.*]] = arith.constant 8 : i32 +! CHECK: %[[SIZE:.*]] = arith.muli %[[COUNT]], %[[ELEM_SIZE]] : i32 +! CHECK: %[[BARRIER_PTR:.*]] = fir.convert %[[BARRIER]]#0 : (!fir.ref<i64>) -> !llvm.ptr +! CHECK: nvvm.inline_ptx "cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%0], [%1], %2, [%3];" ro(%{{.*}}, %{{.*}}, %[[SIZE]], %[[BARRIER_PTR]] : !fir.ref<!fir.array<1024xi64>>, !fir.ref<i64>, i32, !llvm.ptr) +! CHECK: nvvm.inline_ptx "mbarrier.expect_tx.relaxed.cta.shared::cta.b64 [%0], %1;" ro(%[[BARRIER_PTR]], %[[SIZE]] : !llvm.ptr, i32) + +attributes(global) subroutine test_tma_bulk_load_r2(a, n) + integer(8), shared :: barrier1 + integer, value :: n + real(2), device :: r8(n) + real(2), shared :: tmp(1024) + integer(4) :: j, elem_count + call tma_bulk_load(barrier1, r8(j), tmp, elem_count) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_tma_bulk_load_r2 +! CHECK: %[[BARRIER:.*]]:2 = hlfir.declare %{{.*}} {data_attr = #cuf.cuda<shared>, uniq_name = "_QFtest_tma_bulk_load_r2Ebarrier1"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>) +! CHECK: %[[ELEM_COUNT:.*]]:2 = hlfir.declare %{{.*}} {data_attr = #cuf.cuda<device>, uniq_name = "_QFtest_tma_bulk_load_r2Eelem_count"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>) +! CHECK: %[[COUNT:.*]] = fir.load %[[ELEM_COUNT]]#0 : !fir.ref<i32> +! CHECK: %[[ELEM_SIZE:.*]] = arith.constant 2 : i32 +! CHECK: %[[SIZE:.*]] = arith.muli %[[COUNT]], %[[ELEM_SIZE]] : i32 +! CHECK: %[[BARRIER_PTR:.*]] = fir.convert %[[BARRIER]]#0 : (!fir.ref<i64>) -> !llvm.ptr +! CHECK: nvvm.inline_ptx "cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%0], [%1], %2, [%3];" ro(%{{.*}}, %{{.*}}, %[[SIZE]], %[[BARRIER_PTR]] : !fir.ref<!fir.array<1024xf16>>, !fir.ref<f16>, i32, !llvm.ptr) +! CHECK: nvvm.inline_ptx "mbarrier.expect_tx.relaxed.cta.shared::cta.b64 [%0], %1;" ro(%[[BARRIER_PTR]], %[[SIZE]] : !llvm.ptr, i32) + +attributes(global) subroutine test_tma_bulk_load_r4(a, n) + integer(8), shared :: barrier1 + integer, value :: n + real(4), device :: r8(n) + real(4), shared :: tmp(1024) + integer(4) :: j, elem_count + call tma_bulk_load(barrier1, r8(j), tmp, elem_count) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_tma_bulk_load_r4 +! CHECK: %[[BARRIER:.*]]:2 = hlfir.declare %{{.*}} {data_attr = #cuf.cuda<shared>, uniq_name = "_QFtest_tma_bulk_load_r4Ebarrier1"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>) +! CHECK: %[[ELEM_COUNT:.*]]:2 = hlfir.declare %{{.*}} {data_attr = #cuf.cuda<device>, uniq_name = "_QFtest_tma_bulk_load_r4Eelem_count"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>) +! CHECK: %[[COUNT:.*]] = fir.load %[[ELEM_COUNT]]#0 : !fir.ref<i32> +! CHECK: %[[ELEM_SIZE:.*]] = arith.constant 4 : i32 +! CHECK: %[[SIZE:.*]] = arith.muli %[[COUNT]], %[[ELEM_SIZE]] : i32 +! CHECK: %[[BARRIER_PTR:.*]] = fir.convert %[[BARRIER]]#0 : (!fir.ref<i64>) -> !llvm.ptr +! CHECK: nvvm.inline_ptx "cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%0], [%1], %2, [%3];" ro(%{{.*}}, %{{.*}}, %[[SIZE]], %[[BARRIER_PTR]] : !fir.ref<!fir.array<1024xf32>>, !fir.ref<f32>, i32, !llvm.ptr) +! CHECK: nvvm.inline_ptx "mbarrier.expect_tx.relaxed.cta.shared::cta.b64 [%0], %1;" ro(%[[BARRIER_PTR]], %[[SIZE]] : !llvm.ptr, i32) + +attributes(global) subroutine test_tma_bulk_load_r8(a, n) + integer(8), shared :: barrier1 + integer, value :: n + real(8), device :: r8(n) + real(8), shared :: tmp(1024) + integer(4) :: j, elem_count + call tma_bulk_load(barrier1, r8(j), tmp, elem_count) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_tma_bulk_load_r8 +! CHECK: %[[BARRIER:.*]]:2 = hlfir.declare %{{.*}} {data_attr = #cuf.cuda<shared>, uniq_name = "_QFtest_tma_bulk_load_r8Ebarrier1"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>) +! CHECK: %[[ELEM_COUNT:.*]]:2 = hlfir.declare %{{.*}} {data_attr = #cuf.cuda<device>, uniq_name = "_QFtest_tma_bulk_load_r8Eelem_count"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>) +! CHECK: %[[COUNT:.*]] = fir.load %[[ELEM_COUNT]]#0 : !fir.ref<i32> +! CHECK: %[[ELEM_SIZE:.*]] = arith.constant 8 : i32 +! CHECK: %[[SIZE:.*]] = arith.muli %[[COUNT]], %[[ELEM_SIZE]] : i32 +! CHECK: %[[BARRIER_PTR:.*]] = fir.convert %[[BARRIER]]#0 : (!fir.ref<i64>) -> !llvm.ptr +! CHECK: nvvm.inline_ptx "cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%0], [%1], %2, [%3];" ro(%{{.*}}, %{{.*}}, %[[SIZE]], %[[BARRIER_PTR]] : !fir.ref<!fir.array<1024xf64>>, !fir.ref<f64>, i32, !llvm.ptr) +! CHECK: nvvm.inline_ptx "mbarrier.expect_tx.relaxed.cta.shared::cta.b64 [%0], %1;" ro(%[[BARRIER_PTR]], %[[SIZE]] : !llvm.ptr, i32) + +attributes(global) subroutine test_tma_bulk_store_c4(c, n) + integer, value :: n + complex(4), device :: c(n) + complex(4), shared :: tmpa(1024) + integer(4) :: j, elem_count + call tma_bulk_store(tmpa, c(j), elem_count) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_tma_bulk_store_c4 +! CHECK: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.cp.async.bulk.wait_group 0 + +attributes(global) subroutine test_tma_bulk_store_c8(c, n) + integer, value :: n + complex(8), device :: c(n) + complex(8), shared :: tmpa(1024) + integer(4) :: j, elem_count + call tma_bulk_store(tmpa, c(j), elem_count) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_tma_bulk_store_c8 +! CHECK: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.cp.async.bulk.wait_group 0 + +attributes(global) subroutine test_tma_bulk_store_i4(c, n) + integer, value :: n + integer(4), device :: c(n) + integer(4), shared :: tmpa(1024) + integer(4) :: j, elem_count + call tma_bulk_store(tmpa, c(j), elem_count) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_tma_bulk_store_i4 +! CHECK: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.cp.async.bulk.wait_group 0 + +attributes(global) subroutine test_tma_bulk_store_i8(c, n) + integer, value :: n + integer(8), device :: c(n) + integer(8), shared :: tmpa(1024) + integer(4) :: j, elem_count + call tma_bulk_store(tmpa, c(j), elem_count) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_tma_bulk_store_i8 +! CHECK: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.cp.async.bulk.wait_group 0 + + +attributes(global) subroutine test_tma_bulk_store_r2(c, n) + integer, value :: n + real(2), device :: c(n) + real(2), shared :: tmpa(1024) + integer(4) :: j, elem_count + call tma_bulk_store(tmpa, c(j), elem_count) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_tma_bulk_store_r2 +! CHECK: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.cp.async.bulk.wait_group 0 + +attributes(global) subroutine test_tma_bulk_store_r4(c, n) + integer, value :: n + real(4), device :: c(n) + real(4), shared :: tmpa(1024) + integer(4) :: j, elem_count + call tma_bulk_store(tmpa, c(j), elem_count) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_tma_bulk_store_r4 +! CHECK: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.cp.async.bulk.wait_group 0 + +attributes(global) subroutine test_tma_bulk_store_r8(c, n) + integer, value :: n + real(8), device :: c(n) + real(8), shared :: tmpa(1024) + integer(4) :: j, elem_count + call tma_bulk_store(tmpa, c(j), elem_count) +end subroutine + +! CHECK-LABEL: func.func @_QPtest_tma_bulk_store_r8 +! CHECK: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.cp.async.bulk.wait_group 0 diff --git a/flang/test/Parser/OpenMP/declare-reduction-multi.f90 b/flang/test/Parser/OpenMP/declare-reduction-multi.f90 index a682958..8856661 100644 --- a/flang/test/Parser/OpenMP/declare-reduction-multi.f90 +++ b/flang/test/Parser/OpenMP/declare-reduction-multi.f90 @@ -26,7 +26,8 @@ program omp_examples type(tt) :: values(n), sum, prod, big, small !$omp declare reduction(+:tt:omp_out%r = omp_out%r + omp_in%r) initializer(omp_priv%r = 0) -!CHECK: !$OMP DECLARE REDUCTION(+:tt: omp_out%r = omp_out%r+omp_in%r) INITIALIZER(omp_priv%r = 0_4) +!CHECK: !$OMP DECLARE REDUCTION(+:tt: omp_out%r = omp_out%r + omp_in%r) INITIALIZER(om& +!CHECK-NEXT: !$OMP&p_priv%r = 0) !PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction @@ -34,11 +35,39 @@ program omp_examples !PARSE-TREE: | | OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add !PARSE-TREE: | | OmpTypeNameList -> OmpTypeName -> TypeSpec -> DerivedTypeSpec !PARSE-TREE: | | | Name = 'tt' -!PARSE-TREE: | | OmpCombinerExpression -> AssignmentStmt = 'omp_out%r=omp_out%r+omp_in%r' -!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv%r=0._4' +!PARSE-TREE: | | OmpCombinerExpression -> OmpStylizedInstance +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | Instance -> AssignmentStmt = 'omp_out%r=omp_out%r+omp_in%r' +!PARSE-TREE: | | | | Variable = 'omp_out%r' +!PARSE-TREE: | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | Name = 'r' +!PARSE-TREE: | | | | Expr = 'omp_out%r+omp_in%r' +!PARSE-TREE: | | | | | Add +!PARSE-TREE: | | | | | | Expr = 'omp_out%r' +!PARSE-TREE: | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | | | Name = 'r' +!PARSE-TREE: | | | | | | Expr = 'omp_in%r' +!PARSE-TREE: | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | DataRef -> Name = 'omp_in' +!PARSE-TREE: | | | | | | | | Name = 'r' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> OmpInitializerExpression -> OmpStylizedInstance +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | Instance -> AssignmentStmt = 'omp_priv%r=0._4' +!PARSE-TREE: | | | Variable = 'omp_priv%r' +!PARSE-TREE: | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | DataRef -> Name = 'omp_priv' +!PARSE-TREE: | | | | | Name = 'r' +!PARSE-TREE: | | | Expr = '0_4' +!PARSE-TREE: | | | | LiteralConstant -> IntLiteralConstant = '0' +!PARSE-TREE: | Flags = None !$omp declare reduction(*:tt:omp_out%r = omp_out%r * omp_in%r) initializer(omp_priv%r = 1) -!CHECK-NEXT: !$OMP DECLARE REDUCTION(*:tt: omp_out%r = omp_out%r*omp_in%r) INITIALIZER(omp_priv%r = 1_4) +!CHECK-NEXT: !$OMP DECLARE REDUCTION(*:tt: omp_out%r = omp_out%r * omp_in%r) INITIALIZER(om& +!CHECK-NEXT: !$OMP&p_priv%r = 1) !PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction @@ -46,11 +75,39 @@ program omp_examples !PARSE-TREE: | | OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Multiply !PARSE-TREE: | | OmpTypeNameList -> OmpTypeName -> TypeSpec -> DerivedTypeSpec !PARSE-TREE: | | | Name = 'tt' -!PARSE-TREE: | | OmpCombinerExpression -> AssignmentStmt = 'omp_out%r=omp_out%r*omp_in%r' -!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv%r=1._4' +!PARSE-TREE: | | OmpCombinerExpression -> OmpStylizedInstance +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | Instance -> AssignmentStmt = 'omp_out%r=omp_out%r*omp_in%r' +!PARSE-TREE: | | | | Variable = 'omp_out%r' +!PARSE-TREE: | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | Name = 'r' +!PARSE-TREE: | | | | Expr = 'omp_out%r*omp_in%r' +!PARSE-TREE: | | | | | Multiply +!PARSE-TREE: | | | | | | Expr = 'omp_out%r' +!PARSE-TREE: | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | | | Name = 'r' +!PARSE-TREE: | | | | | | Expr = 'omp_in%r' +!PARSE-TREE: | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | DataRef -> Name = 'omp_in' +!PARSE-TREE: | | | | | | | | Name = 'r' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> OmpInitializerExpression -> OmpStylizedInstance +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | Instance -> AssignmentStmt = 'omp_priv%r=1._4' +!PARSE-TREE: | | | Variable = 'omp_priv%r' +!PARSE-TREE: | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | DataRef -> Name = 'omp_priv' +!PARSE-TREE: | | | | | Name = 'r' +!PARSE-TREE: | | | Expr = '1_4' +!PARSE-TREE: | | | | LiteralConstant -> IntLiteralConstant = '1' +!PARSE-TREE: | Flags = None !$omp declare reduction(max:tt:omp_out = mymax(omp_out, omp_in)) initializer(omp_priv%r = 0) -!CHECK-NEXT: !$OMP DECLARE REDUCTION(max:tt: omp_out = mymax(omp_out,omp_in)) INITIALIZER(omp_priv%r = 0_4) +!CHECK-NEXT: !$OMP DECLARE REDUCTION(max:tt: omp_out = mymax(omp_out, omp_in)) INITIALIZER(& +!CHECK-NEXT: !$OMP&omp_priv%r = 0) !PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction @@ -58,11 +115,36 @@ program omp_examples !PARSE-TREE: | | OmpReductionIdentifier -> ProcedureDesignator -> Name = 'max' !PARSE-TREE: | | OmpTypeNameList -> OmpTypeName -> TypeSpec -> DerivedTypeSpec !PARSE-TREE: | | | Name = 'tt' -!PARSE-TREE: | | OmpCombinerExpression -> AssignmentStmt = 'omp_out=mymax(omp_out,omp_in)' -!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv%r=0._4' +!PARSE-TREE: | | OmpCombinerExpression -> OmpStylizedInstance +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | Instance -> AssignmentStmt = 'omp_out=mymax(omp_out,omp_in)' +!PARSE-TREE: | | | | Variable = 'omp_out' +!PARSE-TREE: | | | | | Designator -> DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | Expr = 'mymax(omp_out,omp_in)' +!PARSE-TREE: | | | | | FunctionReference -> Call +!PARSE-TREE: | | | | | | ProcedureDesignator -> Name = 'mymax' +!PARSE-TREE: | | | | | | ActualArgSpec +!PARSE-TREE: | | | | | | | ActualArg -> Expr = 'omp_out' +!PARSE-TREE: | | | | | | | | Designator -> DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | ActualArgSpec +!PARSE-TREE: | | | | | | | ActualArg -> Expr = 'omp_in' +!PARSE-TREE: | | | | | | | | Designator -> DataRef -> Name = 'omp_in' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> OmpInitializerExpression -> OmpStylizedInstance +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | Instance -> AssignmentStmt = 'omp_priv%r=0._4' +!PARSE-TREE: | | | Variable = 'omp_priv%r' +!PARSE-TREE: | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | DataRef -> Name = 'omp_priv' +!PARSE-TREE: | | | | | Name = 'r' +!PARSE-TREE: | | | Expr = '0_4' +!PARSE-TREE: | | | | LiteralConstant -> IntLiteralConstant = '0' +!PARSE-TREE: | Flags = None !$omp declare reduction(min:tt:omp_out%r = min(omp_out%r, omp_in%r)) initializer(omp_priv%r = 1) -!CHECK-NEXT: !$OMP DECLARE REDUCTION(min:tt: omp_out%r = min(omp_out%r,omp_in%r)) INITIALIZER(omp_priv%r = 1_4) +!CHECK-NEXT: !$OMP DECLARE REDUCTION(min:tt: omp_out%r = min(omp_out%r, omp_in%r)) INITIALI& +!CHECK-NEXT: !$OMP&ZER(omp_priv%r = 1) !PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction @@ -70,8 +152,38 @@ program omp_examples !PARSE-TREE: | | OmpReductionIdentifier -> ProcedureDesignator -> Name = 'min' !PARSE-TREE: | | OmpTypeNameList -> OmpTypeName -> TypeSpec -> DerivedTypeSpec !PARSE-TREE: | | | Name = 'tt' -!PARSE-TREE: | | OmpCombinerExpression -> AssignmentStmt = 'omp_out%r=min(omp_out%r,omp_in%r)' -!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv%r=1._4' +!PARSE-TREE: | | OmpCombinerExpression -> OmpStylizedInstance +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | Instance -> AssignmentStmt = 'omp_out%r=min(omp_out%r,omp_in%r)' +!PARSE-TREE: | | | | Variable = 'omp_out%r' +!PARSE-TREE: | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | Name = 'r' +!PARSE-TREE: | | | | Expr = 'min(omp_out%r,omp_in%r)' +!PARSE-TREE: | | | | | FunctionReference -> Call +!PARSE-TREE: | | | | | | ProcedureDesignator -> Name = 'min' +!PARSE-TREE: | | | | | | ActualArgSpec +!PARSE-TREE: | | | | | | | ActualArg -> Expr = 'omp_out%r' +!PARSE-TREE: | | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | | | | Name = 'r' +!PARSE-TREE: | | | | | | ActualArgSpec +!PARSE-TREE: | | | | | | | ActualArg -> Expr = 'omp_in%r' +!PARSE-TREE: | | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | | DataRef -> Name = 'omp_in' +!PARSE-TREE: | | | | | | | | | Name = 'r' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> OmpInitializerExpression -> OmpStylizedInstance +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | Instance -> AssignmentStmt = 'omp_priv%r=1._4' +!PARSE-TREE: | | | Variable = 'omp_priv%r' +!PARSE-TREE: | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | DataRef -> Name = 'omp_priv' +!PARSE-TREE: | | | | | Name = 'r' +!PARSE-TREE: | | | Expr = '1_4' +!PARSE-TREE: | | | | LiteralConstant -> IntLiteralConstant = '1' +!PARSE-TREE: | Flags = None call random_number(values%r) diff --git a/flang/test/Parser/OpenMP/declare-reduction-operator.f90 b/flang/test/Parser/OpenMP/declare-reduction-operator.f90 index e4d07c8..0d337c1 100644 --- a/flang/test/Parser/OpenMP/declare-reduction-operator.f90 +++ b/flang/test/Parser/OpenMP/declare-reduction-operator.f90 @@ -16,7 +16,8 @@ subroutine reduce_1 ( n, tts ) type(tt) :: tts(n) type(tt2) :: tts2(n) -!CHECK: !$OMP DECLARE REDUCTION(+:tt: omp_out = tt(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y)) INITIALIZER(omp_priv = tt(x=0_4,y=0_4)) +!CHECK: !$OMP DECLARE REDUCTION(+:tt: omp_out = tt(omp_out%x - omp_in%x , omp_out%y - & +!CHECK: !$OMP&omp_in%y)) INITIALIZER(omp_priv = tt(0,0)) !PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction @@ -24,13 +25,60 @@ subroutine reduce_1 ( n, tts ) !PARSE-TREE: | | OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add !PARSE-TREE: | | OmpTypeNameList -> OmpTypeName -> TypeSpec -> DerivedTypeSpec !PARSE-TREE: | | | Name = 'tt' -!PARSE-TREE: | | OmpCombinerExpression -> AssignmentStmt = 'omp_out=tt(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y)' -!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv=tt(x=0_4,y=0_4)' - +!PARSE-TREE: | | OmpCombinerExpression -> OmpStylizedInstance +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | Instance -> AssignmentStmt = 'omp_out=tt(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y)' +!PARSE-TREE: | | | | Variable = 'omp_out' +!PARSE-TREE: | | | | | Designator -> DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | Expr = 'tt(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y)' +!PARSE-TREE: | | | | | StructureConstructor +!PARSE-TREE: | | | | | | DerivedTypeSpec +!PARSE-TREE: | | | | | | | Name = 'tt' +!PARSE-TREE: | | | | | | ComponentSpec +!PARSE-TREE: | | | | | | | ComponentDataSource -> Expr = 'omp_out%x-omp_in%x' +!PARSE-TREE: | | | | | | | | Subtract +!PARSE-TREE: | | | | | | | | | Expr = 'omp_out%x' +!PARSE-TREE: | | | | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | | | | | | Name = 'x' +!PARSE-TREE: | | | | | | | | | Expr = 'omp_in%x' +!PARSE-TREE: | | | | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | | | | DataRef -> Name = 'omp_in' +!PARSE-TREE: | | | | | | | | | | | Name = 'x' +!PARSE-TREE: | | | | | | ComponentSpec +!PARSE-TREE: | | | | | | | ComponentDataSource -> Expr = 'omp_out%y-omp_in%y' +!PARSE-TREE: | | | | | | | | Subtract +!PARSE-TREE: | | | | | | | | | Expr = 'omp_out%y' +!PARSE-TREE: | | | | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | | | | | | Name = 'y' +!PARSE-TREE: | | | | | | | | | Expr = 'omp_in%y' +!PARSE-TREE: | | | | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | | | | DataRef -> Name = 'omp_in' +!PARSE-TREE: | | | | | | | | | | | Name = 'y' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> OmpInitializerExpression -> OmpStylizedInstance +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | Instance -> AssignmentStmt = 'omp_priv=tt(x=0_4,y=0_4)' +!PARSE-TREE: | | | Variable = 'omp_priv' +!PARSE-TREE: | | | | Designator -> DataRef -> Name = 'omp_priv' +!PARSE-TREE: | | | Expr = 'tt(x=0_4,y=0_4)' +!PARSE-TREE: | | | | StructureConstructor +!PARSE-TREE: | | | | | DerivedTypeSpec +!PARSE-TREE: | | | | | | Name = 'tt' +!PARSE-TREE: | | | | | ComponentSpec +!PARSE-TREE: | | | | | | ComponentDataSource -> Expr = '0_4' +!PARSE-TREE: | | | | | | | LiteralConstant -> IntLiteralConstant = '0' +!PARSE-TREE: | | | | | ComponentSpec +!PARSE-TREE: | | | | | | ComponentDataSource -> Expr = '0_4' +!PARSE-TREE: | | | | | | | LiteralConstant -> IntLiteralConstant = '0' +!PARSE-TREE: | Flags = None !$omp declare reduction(+ : tt : omp_out = tt(omp_out%x - omp_in%x , omp_out%y - omp_in%y)) initializer(omp_priv = tt(0,0)) -!CHECK: !$OMP DECLARE REDUCTION(+:tt2: omp_out = tt2(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y)) INITIALIZER(omp_priv = tt2(x=0._8,y=0._8) +!CHECK: !$OMP DECLARE REDUCTION(+:tt2: omp_out = tt2(omp_out%x - omp_in%x , omp_out%y & +!CHECK: !$OMP&- omp_in%y)) INITIALIZER(omp_priv = tt2(0,0)) !PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction @@ -38,9 +86,55 @@ subroutine reduce_1 ( n, tts ) !PARSE-TREE: | | OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add !PARSE-TREE: | | OmpTypeNameList -> OmpTypeName -> TypeSpec -> DerivedTypeSpec !PARSE-TREE: | | | Name = 'tt2' -!PARSE-TREE: | | OmpCombinerExpression -> AssignmentStmt = 'omp_out=tt2(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y)' -!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv=tt2(x=0._8,y=0._8)' - +!PARSE-TREE: | | OmpCombinerExpression -> OmpStylizedInstance +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | Instance -> AssignmentStmt = 'omp_out=tt2(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y)' +!PARSE-TREE: | | | | Variable = 'omp_out' +!PARSE-TREE: | | | | | Designator -> DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | Expr = 'tt2(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y)' +!PARSE-TREE: | | | | | StructureConstructor +!PARSE-TREE: | | | | | | DerivedTypeSpec +!PARSE-TREE: | | | | | | | Name = 'tt2' +!PARSE-TREE: | | | | | | ComponentSpec +!PARSE-TREE: | | | | | | | ComponentDataSource -> Expr = 'omp_out%x-omp_in%x' +!PARSE-TREE: | | | | | | | | Subtract +!PARSE-TREE: | | | | | | | | | Expr = 'omp_out%x' +!PARSE-TREE: | | | | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | | | | | | Name = 'x' +!PARSE-TREE: | | | | | | | | | Expr = 'omp_in%x' +!PARSE-TREE: | | | | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | | | | DataRef -> Name = 'omp_in' +!PARSE-TREE: | | | | | | | | | | | Name = 'x' +!PARSE-TREE: | | | | | | ComponentSpec +!PARSE-TREE: | | | | | | | ComponentDataSource -> Expr = 'omp_out%y-omp_in%y' +!PARSE-TREE: | | | | | | | | Subtract +!PARSE-TREE: | | | | | | | | | Expr = 'omp_out%y' +!PARSE-TREE: | | | | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | | | | | | Name = 'y' +!PARSE-TREE: | | | | | | | | | Expr = 'omp_in%y' +!PARSE-TREE: | | | | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | | | | DataRef -> Name = 'omp_in' +!PARSE-TREE: | | | | | | | | | | | Name = 'y' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> OmpInitializerExpression -> OmpStylizedInstance +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | Instance -> AssignmentStmt = 'omp_priv=tt2(x=0._8,y=0._8)' +!PARSE-TREE: | | | Variable = 'omp_priv' +!PARSE-TREE: | | | | Designator -> DataRef -> Name = 'omp_priv' +!PARSE-TREE: | | | Expr = 'tt2(x=0._8,y=0._8)' +!PARSE-TREE: | | | | StructureConstructor +!PARSE-TREE: | | | | | DerivedTypeSpec +!PARSE-TREE: | | | | | | Name = 'tt2' +!PARSE-TREE: | | | | | ComponentSpec +!PARSE-TREE: | | | | | | ComponentDataSource -> Expr = '0_4' +!PARSE-TREE: | | | | | | | LiteralConstant -> IntLiteralConstant = '0' +!PARSE-TREE: | | | | | ComponentSpec +!PARSE-TREE: | | | | | | ComponentDataSource -> Expr = '0_4' +!PARSE-TREE: | | | | | | | LiteralConstant -> IntLiteralConstant = '0' +!PARSE-TREE: | Flags = None !$omp declare reduction(+ :tt2 : omp_out = tt2(omp_out%x - omp_in%x , omp_out%y - omp_in%y)) initializer(omp_priv = tt2(0,0)) type(tt) :: diffp = tt( 0, 0 ) diff --git a/flang/test/Parser/OpenMP/declare-reduction-unparse-with-symbols.f90 b/flang/test/Parser/OpenMP/declare-reduction-unparse-with-symbols.f90 index 455fc17..f026f15 100644 --- a/flang/test/Parser/OpenMP/declare-reduction-unparse-with-symbols.f90 +++ b/flang/test/Parser/OpenMP/declare-reduction-unparse-with-symbols.f90 @@ -8,6 +8,6 @@ end !CHECK: !DEF: /f00 (Subroutine) Subprogram !CHECK: subroutine f00 -!CHECK: !$omp declare reduction(fred:integer,real: omp_out = omp_in+omp_out) +!CHECK: !$omp declare reduction(fred:integer, real: omp_out = omp_in + omp_out) !CHECK: end subroutine diff --git a/flang/test/Parser/OpenMP/declare-reduction-unparse.f90 b/flang/test/Parser/OpenMP/declare-reduction-unparse.f90 index 73d7ccf..7897eb0 100644 --- a/flang/test/Parser/OpenMP/declare-reduction-unparse.f90 +++ b/flang/test/Parser/OpenMP/declare-reduction-unparse.f90 @@ -19,7 +19,8 @@ function func(x, n, init) end subroutine initme end interface !$omp declare reduction(red_add:integer(4):omp_out=omp_out+omp_in) initializer(initme(omp_priv,0)) -!CHECK: !$OMP DECLARE REDUCTION(red_add:INTEGER(KIND=4_4): omp_out = omp_out+omp_in) INITIALIZER(initme(omp_priv, 0_4)) +!CHECK: !$OMP DECLARE REDUCTION(red_add:INTEGER(KIND=4_4): omp_out=omp_out+omp_in) INITIA& +!CHECKL !$OMP&LIZER(initme(omp_priv,0)) !PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification !PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction @@ -27,9 +28,31 @@ function func(x, n, init) !PARSE-TREE: | | OmpReductionIdentifier -> ProcedureDesignator -> Name = 'red_add' !PARSE-TREE: | | OmpTypeNameList -> OmpTypeName -> DeclarationTypeSpec -> IntrinsicTypeSpec -> IntegerTypeSpec -> KindSelector -> Scalar -> Integer -> Constant -> Expr = '4_4' !PARSE-TREE: | | | LiteralConstant -> IntLiteralConstant = '4' -!PARSE-TREE: | | OmpCombinerExpression -> AssignmentStmt = 'omp_out=omp_out+omp_in' -!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> OmpInitializerProc -!PARSE-TREE: | | ProcedureDesignator -> Name = 'initme' +!PARSE-TREE: | | OmpCombinerExpression -> OmpStylizedInstance +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | Instance -> AssignmentStmt = 'omp_out=omp_out+omp_in' +!PARSE-TREE: | | | | Variable = 'omp_out' +!PARSE-TREE: | | | | | Designator -> DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | Expr = 'omp_out+omp_in' +!PARSE-TREE: | | | | | Add +!PARSE-TREE: | | | | | | Expr = 'omp_out' +!PARSE-TREE: | | | | | | | Designator -> DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | Expr = 'omp_in' +!PARSE-TREE: | | | | | | | Designator -> DataRef -> Name = 'omp_in' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> OmpInitializerExpression -> OmpStylizedInstance +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | Instance -> CallStmt = 'CALL initme(omp_priv,0_4)' +!PARSE-TREE: | | | Call +!PARSE-TREE: | | | | ProcedureDesignator -> Name = 'initme' +!PARSE-TREE: | | | | ActualArgSpec +!PARSE-TREE: | | | | | ActualArg -> Expr = 'omp_priv' +!PARSE-TREE: | | | | | | Designator -> DataRef -> Name = 'omp_priv' +!PARSE-TREE: | | | | ActualArgSpec +!PARSE-TREE: | | | | | ActualArg -> Expr = '0_4' +!PARSE-TREE: | | | | | | LiteralConstant -> IntLiteralConstant = '0' +!PARSE-TREE: | Flags = None res=init !$omp simd reduction(red_add:res) @@ -59,7 +82,8 @@ end function func !CHECK-LABEL: program main program main integer :: my_var -!CHECK: !$OMP DECLARE REDUCTION(my_add_red:INTEGER: omp_out = omp_out+omp_in) INITIALIZER(omp_priv = 0_4) +!CHECK: !$OMP DECLARE REDUCTION(my_add_red:INTEGER: omp_out = omp_out + omp_in) INITIA& +!CHECK: !$OMP&LIZER(omp_priv=0) !$omp declare reduction (my_add_red : integer : omp_out = omp_out + omp_in) initializer (omp_priv=0) my_var = 0 @@ -74,5 +98,24 @@ end program main !PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpReductionSpecifier !PARSE-TREE: | | OmpReductionIdentifier -> ProcedureDesignator -> Name = 'my_add_red' !PARSE-TREE: | | OmpTypeNameList -> OmpTypeName -> DeclarationTypeSpec -> IntrinsicTypeSpec -> IntegerTypeSpec -> -!PARSE-TREE: | | OmpCombinerExpression -> AssignmentStmt = 'omp_out=omp_out+omp_in' -!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv=0_4' +!PARSE-TREE: | | OmpCombinerExpression -> OmpStylizedInstance +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | Instance -> AssignmentStmt = 'omp_out=omp_out+omp_in' +!PARSE-TREE: | | | | Variable = 'omp_out' +!PARSE-TREE: | | | | | Designator -> DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | Expr = 'omp_out+omp_in' +!PARSE-TREE: | | | | | Add +!PARSE-TREE: | | | | | | Expr = 'omp_out' +!PARSE-TREE: | | | | | | | Designator -> DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | Expr = 'omp_in' +!PARSE-TREE: | | | | | | | Designator -> DataRef -> Name = 'omp_in' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> OmpInitializerExpression -> OmpStylizedInstance +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | OmpStylizedDeclaration +!PARSE-TREE: | | Instance -> AssignmentStmt = 'omp_priv=0_4' +!PARSE-TREE: | | | Variable = 'omp_priv' +!PARSE-TREE: | | | | Designator -> DataRef -> Name = 'omp_priv' +!PARSE-TREE: | | | Expr = '0_4' +!PARSE-TREE: | | | | LiteralConstant -> IntLiteralConstant = '0' +!PARSE-TREE: | Flags = None diff --git a/flang/test/Parser/OpenMP/metadirective-dirspec.f90 b/flang/test/Parser/OpenMP/metadirective-dirspec.f90 index c373001..b64ceb1 100644 --- a/flang/test/Parser/OpenMP/metadirective-dirspec.f90 +++ b/flang/test/Parser/OpenMP/metadirective-dirspec.f90 @@ -105,8 +105,8 @@ end !UNPARSE: TYPE :: tt2 !UNPARSE: REAL :: x !UNPARSE: END TYPE -!UNPARSE: !$OMP METADIRECTIVE WHEN(USER={CONDITION(.true._4)}: DECLARE REDUCTION(+:tt1,tt2: omp_out%x = omp_in%x+omp_out%x)& -!UNPARSE: !$OMP&) +!UNPARSE: !$OMP METADIRECTIVE WHEN(USER={CONDITION(.true._4)}: DECLARE REDUCTION(+:tt1, tt2: omp& +!UNPARSE: !$OMP&_out%x = omp_in%x + omp_out%x)) !UNPARSE: END SUBROUTINE !PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OmpMetadirectiveDirective @@ -127,21 +127,44 @@ end !PARSE-TREE: | | | | | Name = 'tt1' !PARSE-TREE: | | | | OmpTypeName -> TypeSpec -> DerivedTypeSpec !PARSE-TREE: | | | | | Name = 'tt2' -!PARSE-TREE: | | | | OmpCombinerExpression -> AssignmentStmt = 'omp_out%x=omp_in%x+omp_out%x' -!PARSE-TREE: | | | | | | Designator -> DataRef -> StructureComponent -!PARSE-TREE: | | | | | | | DataRef -> Name = 'omp_out' -!PARSE-TREE: | | | | | | | Name = 'x' -!PARSE-TREE: | | | | | Expr = 'omp_in%x+omp_out%x' -!PARSE-TREE: | | | | | | Add -!PARSE-TREE: | | | | | | | Expr = 'omp_in%x' -!PARSE-TREE: | | | | | | | | Designator -> DataRef -> StructureComponent -!PARSE-TREE: | | | | | | | | | DataRef -> Name = 'omp_in' -!PARSE-TREE: | | | | | | | | | Name = 'x' -!PARSE-TREE: | | | | | | | Expr = 'omp_out%x' -!PARSE-TREE: | | | | | | | | Designator -> DataRef -> StructureComponent -!PARSE-TREE: | | | | | | | | | DataRef -> Name = 'omp_out' -!PARSE-TREE: | | | | | | | | | Name = 'x' +!PARSE-TREE: | | | | OmpCombinerExpression -> OmpStylizedInstance +!PARSE-TREE: | | | | | OmpStylizedDeclaration +!PARSE-TREE: | | | | | OmpStylizedDeclaration +!PARSE-TREE: | | | | | Instance -> AssignmentStmt = 'omp_out%x=omp_in%x+omp_out%x' +!PARSE-TREE: | | | | | | Variable = 'omp_out%x' +!PARSE-TREE: | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | | | Name = 'x' +!PARSE-TREE: | | | | | | Expr = 'omp_in%x+omp_out%x' +!PARSE-TREE: | | | | | | | Add +!PARSE-TREE: | | | | | | | | Expr = 'omp_in%x' +!PARSE-TREE: | | | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | | | DataRef -> Name = 'omp_in' +!PARSE-TREE: | | | | | | | | | | Name = 'x' +!PARSE-TREE: | | | | | | | | Expr = 'omp_out%x' +!PARSE-TREE: | | | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | | | | | Name = 'x' +!PARSE-TREE: | | | | OmpStylizedInstance +!PARSE-TREE: | | | | | OmpStylizedDeclaration +!PARSE-TREE: | | | | | OmpStylizedDeclaration +!PARSE-TREE: | | | | | Instance -> AssignmentStmt = 'omp_out%x=omp_in%x+omp_out%x' +!PARSE-TREE: | | | | | | Variable = 'omp_out%x' +!PARSE-TREE: | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | | | Name = 'x' +!PARSE-TREE: | | | | | | Expr = 'omp_in%x+omp_out%x' +!PARSE-TREE: | | | | | | | Add +!PARSE-TREE: | | | | | | | | Expr = 'omp_in%x' +!PARSE-TREE: | | | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | | | DataRef -> Name = 'omp_in' +!PARSE-TREE: | | | | | | | | | | Name = 'x' +!PARSE-TREE: | | | | | | | | Expr = 'omp_out%x' +!PARSE-TREE: | | | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | | | | | Name = 'x' !PARSE-TREE: | | | OmpClauseList -> +!PARSE-TREE: | | | Flags = None subroutine f04 !$omp metadirective when(user={condition(.true.)}: & diff --git a/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 b/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 index 39e8f05..50a38c6 100644 --- a/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 +++ b/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 @@ -79,7 +79,7 @@ end !UNPARSE: TYPE :: t !UNPARSE: INTEGER :: x !UNPARSE: END TYPE -!UNPARSE: !$OMP DECLARE_REDUCTION(+:t: omp_out%x = omp_out%x+omp_in%x) +!UNPARSE: !$OMP DECLARE_REDUCTION(+:t: omp_out%x = omp_out%x + omp_in%x) !UNPARSE: END SUBROUTINE !PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification @@ -88,21 +88,24 @@ end !PARSE-TREE: | | OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add !PARSE-TREE: | | OmpTypeNameList -> OmpTypeName -> TypeSpec -> DerivedTypeSpec !PARSE-TREE: | | | Name = 't' -!PARSE-TREE: | | OmpCombinerExpression -> AssignmentStmt = 'omp_out%x=omp_out%x+omp_in%x' -!PARSE-TREE: | | | Variable = 'omp_out%x' -!PARSE-TREE: | | | | Designator -> DataRef -> StructureComponent -!PARSE-TREE: | | | | | DataRef -> Name = 'omp_out' -!PARSE-TREE: | | | | | Name = 'x' -!PARSE-TREE: | | | Expr = 'omp_out%x+omp_in%x' -!PARSE-TREE: | | | | Add -!PARSE-TREE: | | | | | Expr = 'omp_out%x' -!PARSE-TREE: | | | | | | Designator -> DataRef -> StructureComponent -!PARSE-TREE: | | | | | | | DataRef -> Name = 'omp_out' -!PARSE-TREE: | | | | | | | Name = 'x' -!PARSE-TREE: | | | | | Expr = 'omp_in%x' -!PARSE-TREE: | | | | | | Designator -> DataRef -> StructureComponent -!PARSE-TREE: | | | | | | | DataRef -> Name = 'omp_in' -!PARSE-TREE: | | | | | | | Name = 'x' +!PARSE-TREE: | | OmpCombinerExpression -> OmpStylizedInstance +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | OmpStylizedDeclaration +!PARSE-TREE: | | | Instance -> AssignmentStmt = 'omp_out%x=omp_out%x+omp_in%x' +!PARSE-TREE: | | | | Variable = 'omp_out%x' +!PARSE-TREE: | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | Name = 'x' +!PARSE-TREE: | | | | Expr = 'omp_out%x+omp_in%x' +!PARSE-TREE: | | | | | Add +!PARSE-TREE: | | | | | | Expr = 'omp_out%x' +!PARSE-TREE: | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | DataRef -> Name = 'omp_out' +!PARSE-TREE: | | | | | | | | Name = 'x' +!PARSE-TREE: | | | | | | Expr = 'omp_in%x' +!PARSE-TREE: | | | | | | | Designator -> DataRef -> StructureComponent +!PARSE-TREE: | | | | | | | | DataRef -> Name = 'omp_in' +!PARSE-TREE: | | | | | | | | Name = 'x' !PARSE-TREE: | OmpClauseList -> !PARSE-TREE: | Flags = None diff --git a/flang/test/Semantics/OpenMP/declare-reduction-error.f90 b/flang/test/Semantics/OpenMP/declare-reduction-error.f90 deleted file mode 100644 index 21f5cc1..0000000 --- a/flang/test/Semantics/OpenMP/declare-reduction-error.f90 +++ /dev/null @@ -1,11 +0,0 @@ -! RUN: not %flang_fc1 -emit-obj -fopenmp -fopenmp-version=50 %s 2>&1 | FileCheck %s - -subroutine initme(x,n) - integer x,n - x=n -end subroutine initme - -subroutine subr - !$omp declare reduction(red_add:integer(4):omp_out=omp_out+omp_in) initializer(initme(omp_priv,0)) - !CHECK: error: Implicit subroutine declaration 'initme' in DECLARE REDUCTION -end subroutine subr diff --git a/flang/test/Semantics/OpenMP/declare-reduction-functions.f90 b/flang/test/Semantics/OpenMP/declare-reduction-functions.f90 index 000d323..89e0771 100644 --- a/flang/test/Semantics/OpenMP/declare-reduction-functions.f90 +++ b/flang/test/Semantics/OpenMP/declare-reduction-functions.f90 @@ -57,9 +57,10 @@ contains !CHECK: adder: UserReductionDetails TYPE(two) !CHECK OtherConstruct scope !CHECK: omp_in size=8 offset=0: ObjectEntity type: TYPE(two) -!CHECK: omp_orig size=8 offset=8: ObjectEntity type: TYPE(two) -!CHECK: omp_out size=8 offset=16: ObjectEntity type: TYPE(two) -!CHECK: omp_priv size=8 offset=24: ObjectEntity type: TYPE(two) +!CHECK: omp_out size=8 offset=8: ObjectEntity type: TYPE(two) +!CHECK OtherConstruct scope +!CHECK: omp_orig size=8 offset=0: ObjectEntity type: TYPE(two) +!CHECK: omp_priv size=8 offset=8: ObjectEntity type: TYPE(two) !$omp simd reduction(adder:res) @@ -101,14 +102,16 @@ contains !CHECK: adder: UserReductionDetails TYPE(two) TYPE(three) !CHECK OtherConstruct scope !CHECK: omp_in size=8 offset=0: ObjectEntity type: TYPE(two) -!CHECK: omp_orig size=8 offset=8: ObjectEntity type: TYPE(two) -!CHECK: omp_out size=8 offset=16: ObjectEntity type: TYPE(two) -!CHECK: omp_priv size=8 offset=24: ObjectEntity type: TYPE(two) +!CHECK: omp_out size=8 offset=8: ObjectEntity type: TYPE(two) +!CHECK OtherConstruct scope +!CHECK: omp_orig size=8 offset=0: ObjectEntity type: TYPE(two) +!CHECK: omp_priv size=8 offset=8: ObjectEntity type: TYPE(two) !CHECK OtherConstruct scope !CHECK: omp_in size=24 offset=0: ObjectEntity type: TYPE(three) -!CHECK: omp_orig size=24 offset=24: ObjectEntity type: TYPE(three) -!CHECK: omp_out size=24 offset=48: ObjectEntity type: TYPE(three) -!CHECK: omp_priv size=24 offset=72: ObjectEntity type: TYPE(three) +!CHECK: omp_out size=24 offset=24: ObjectEntity type: TYPE(three) +!CHECK OtherConstruct scope +!CHECK: omp_orig size=24 offset=0: ObjectEntity type: TYPE(three) +!CHECK: omp_priv size=24 offset=24: ObjectEntity type: TYPE(three) !$omp simd reduction(adder:res3) do i=1,n @@ -135,9 +138,10 @@ contains !CHECK: op.+: UserReductionDetails TYPE(two) !CHECK OtherConstruct scope !CHECK: omp_in size=8 offset=0: ObjectEntity type: TYPE(two) -!CHECK: omp_orig size=8 offset=8: ObjectEntity type: TYPE(two) -!CHECK: omp_out size=8 offset=16: ObjectEntity type: TYPE(two) -!CHECK: omp_priv size=8 offset=24: ObjectEntity type: TYPE(two) +!CHECK: omp_out size=8 offset=8: ObjectEntity type: TYPE(two) +!CHECK OtherConstruct scope +!CHECK: omp_orig size=8 offset=0: ObjectEntity type: TYPE(two) +!CHECK: omp_priv size=8 offset=8: ObjectEntity type: TYPE(two) !$omp simd reduction(+:res) @@ -163,14 +167,16 @@ contains !CHECK: op.+: UserReductionDetails TYPE(two) TYPE(three) !CHECK OtherConstruct scope !CHECK: omp_in size=8 offset=0: ObjectEntity type: TYPE(two) -!CHECK: omp_orig size=8 offset=8: ObjectEntity type: TYPE(two) -!CHECK: omp_out size=8 offset=16: ObjectEntity type: TYPE(two) -!CHECK: omp_priv size=8 offset=24: ObjectEntity type: TYPE(two) +!CHECK: omp_out size=8 offset=8: ObjectEntity type: TYPE(two) +!CHECK OtherConstruct scope +!CHECK: omp_orig size=8 offset=0: ObjectEntity type: TYPE(two) +!CHECK: omp_priv size=8 offset=8: ObjectEntity type: TYPE(two) !CHECK: OtherConstruct scope !CHECK: omp_in size=24 offset=0: ObjectEntity type: TYPE(three) -!CHECK: omp_orig size=24 offset=24: ObjectEntity type: TYPE(three) -!CHECK: omp_out size=24 offset=48: ObjectEntity type: TYPE(three) -!CHECK: omp_priv size=24 offset=72: ObjectEntity type: TYPE(three) +!CHECK: omp_out size=24 offset=24: ObjectEntity type: TYPE(three) +!CHECK OtherConstruct scope +!CHECK: omp_orig size=24 offset=0: ObjectEntity type: TYPE(three) +!CHECK: omp_priv size=24 offset=24: ObjectEntity type: TYPE(three) !$omp simd reduction(+:res3) do i=1,n @@ -183,6 +189,7 @@ contains enddo res%t2 = res2 res%t3 = res3 + funcBtwothree = res end function funcBtwothree !! This is checking a special case, where a reduction is declared inside a @@ -191,11 +198,12 @@ contains pure logical function reduction() !CHECK: reduction size=4 offset=0: ObjectEntity funcResult type: LOGICAL(4) !CHECK: rr: UserReductionDetails INTEGER(4) -!CHECK: OtherConstruct scope: size=16 alignment=4 sourceRange=0 bytes +!CHECK: OtherConstruct scope: size=8 alignment=4 sourceRange=0 bytes !CHECK: omp_in size=4 offset=0: ObjectEntity type: INTEGER(4) -!CHECK: omp_orig size=4 offset=4: ObjectEntity type: INTEGER(4) -!CHECK: omp_out size=4 offset=8: ObjectEntity type: INTEGER(4) -!CHECK: omp_priv size=4 offset=12: ObjectEntity type: INTEGER(4) +!CHECK: omp_out size=4 offset=4: ObjectEntity type: INTEGER(4) +!CHECK: OtherConstruct scope: size=8 alignment=4 sourceRange=0 bytes +!CHECK: omp_orig size=4 offset=0: ObjectEntity type: INTEGER(4) +!CHECK: omp_priv size=4 offset=4: ObjectEntity type: INTEGER(4) !$omp declare reduction (rr : integer : omp_out = omp_out + omp_in) initializer (omp_priv = 0) reduction = .false. end function reduction diff --git a/flang/test/Semantics/OpenMP/declare-reduction-logical.f90 b/flang/test/Semantics/OpenMP/declare-reduction-logical.f90 index 7ab7cad..87fcecd 100644 --- a/flang/test/Semantics/OpenMP/declare-reduction-logical.f90 +++ b/flang/test/Semantics/OpenMP/declare-reduction-logical.f90 @@ -18,9 +18,10 @@ contains !CHECK: op.AND: UserReductionDetails TYPE(logicalwrapper) !CHECK OtherConstruct scope !CHECK: omp_in size=4 offset=0: ObjectEntity type: TYPE(logicalwrapper) -!CHECK: omp_orig size=4 offset=4: ObjectEntity type: TYPE(logicalwrapper) -!CHECK: omp_out size=4 offset=8: ObjectEntity type: TYPE(logicalwrapper) -!CHECK: omp_priv size=4 offset=12: ObjectEntity type: TYPE(logicalwrapper) +!CHECK: omp_out size=4 offset=4: ObjectEntity type: TYPE(logicalwrapper) +!CHECK OtherConstruct scope +!CHECK: omp_orig size=4 offset=0: ObjectEntity type: TYPE(logicalwrapper) +!CHECK: omp_priv size=4 offset=4: ObjectEntity type: TYPE(logicalwrapper) !$omp simd reduction(.AND.:res) do i=1,n diff --git a/flang/test/Semantics/OpenMP/declare-reduction-modfile.f90 b/flang/test/Semantics/OpenMP/declare-reduction-modfile.f90 index 0882de8..763179c 100644 --- a/flang/test/Semantics/OpenMP/declare-reduction-modfile.f90 +++ b/flang/test/Semantics/OpenMP/declare-reduction-modfile.f90 @@ -6,13 +6,13 @@ !type::t1 !integer(4)::val !endtype -!!$OMP DECLARE REDUCTION(*:t1:omp_out=omp_out*omp_in)INITIALIZER(omp_priv=& -!!$OMP&t1(1)) +!!$OMP DECLARE REDUCTION(*:t1: omp_out=omp_out*omp_in) INITIALIZER(omp_priv=t1(& +!!$OMP&1)) !!$OMP METADIRECTIVE OTHERWISE(DECLARE REDUCTION(+:INTEGER)) -!!$OMP DECLARE REDUCTION(.fluffy.:t1:omp_out=omp_out.fluffy.omp_in)INITIALI& -!!$OMP&ZER(omp_priv=t1(0)) -!!$OMP DECLARE REDUCTION(.mul.:t1:omp_out=omp_out.mul.omp_in)INITIALIZER(om& -!!$OMP&p_priv=t1(1)) +!!$OMP DECLARE REDUCTION(.fluffy.:t1: omp_out=omp_out.fluffy.omp_in) INITIALIZE& +!!$OMP&R(omp_priv=t1(0)) +!!$OMP DECLARE REDUCTION(.mul.:t1: omp_out=omp_out.mul.omp_in) INITIALIZER(omp_& +!!$OMP&priv=t1(1)) !interface operator(.mul.) !procedure::mul !end interface diff --git a/flang/test/Semantics/OpenMP/declare-reduction-operator.f90 b/flang/test/Semantics/OpenMP/declare-reduction-operator.f90 index dc12332..5fc4205 100644 --- a/flang/test/Semantics/OpenMP/declare-reduction-operator.f90 +++ b/flang/test/Semantics/OpenMP/declare-reduction-operator.f90 @@ -11,11 +11,9 @@ module m1 !$omp declare reduction(.fluffy.:t1:omp_out=omp_out.fluffy.omp_in) !CHECK: op.fluffy., PUBLIC: UserReductionDetails TYPE(t1) !CHECK: t1, PUBLIC: DerivedType components: val -!CHECK: OtherConstruct scope: size=16 alignment=4 sourceRange=0 bytes +!CHECK: OtherConstruct scope: size=8 alignment=4 sourceRange=0 bytes !CHECK: omp_in size=4 offset=0: ObjectEntity type: TYPE(t1) -!CHECK: omp_orig size=4 offset=4: ObjectEntity type: TYPE(t1) -!CHECK: omp_out size=4 offset=8: ObjectEntity type: TYPE(t1) -!CHECK: omp_priv size=4 offset=12: ObjectEntity type: TYPE(t1) +!CHECK: omp_out size=4 offset=4: ObjectEntity type: TYPE(t1) contains function my_mul(x, y) type (t1), intent (in) :: x, y diff --git a/flang/test/Semantics/OpenMP/declare-reduction-operators.f90 b/flang/test/Semantics/OpenMP/declare-reduction-operators.f90 index 84dbe1a..e0006bf 100644 --- a/flang/test/Semantics/OpenMP/declare-reduction-operators.f90 +++ b/flang/test/Semantics/OpenMP/declare-reduction-operators.f90 @@ -64,9 +64,10 @@ program test_vector !CHECK: OtherConstruct scope: !CHECK: omp_in size=12 offset=0: ObjectEntity type: TYPE(vector) -!CHECK: omp_orig size=12 offset=12: ObjectEntity type: TYPE(vector) -!CHECK: omp_out size=12 offset=24: ObjectEntity type: TYPE(vector) -!CHECK: omp_priv size=12 offset=36: ObjectEntity type: TYPE(vector) +!CHECK: omp_out size=12 offset=12: ObjectEntity type: TYPE(vector) +!CHECK: OtherConstruct scope: +!CHECK: omp_orig size=12 offset=0: ObjectEntity type: TYPE(vector) +!CHECK: omp_priv size=12 offset=12: ObjectEntity type: TYPE(vector) v2 = Vector(0.0, 0.0, 0.0) v1 = Vector(1.0, 2.0, 3.0) diff --git a/flang/test/Semantics/OpenMP/declare-reduction-renamedop.f90 b/flang/test/Semantics/OpenMP/declare-reduction-renamedop.f90 index 9cd638d..115fe51 100644 --- a/flang/test/Semantics/OpenMP/declare-reduction-renamedop.f90 +++ b/flang/test/Semantics/OpenMP/declare-reduction-renamedop.f90 @@ -33,11 +33,12 @@ program test_omp_reduction !$omp declare reduction (.modmul. : t1 : omp_out = omp_out .modmul. omp_in) initializer(omp_priv = t1(1.0)) !CHECK: op.modmul.: UserReductionDetails TYPE(t1) !CHECK: t1: Use from t1 in module1 -!CHECK: OtherConstruct scope: size=16 alignment=4 sourceRange=0 bytes +!CHECK: OtherConstruct scope: size=8 alignment=4 sourceRange=0 bytes !CHECK: omp_in size=4 offset=0: ObjectEntity type: TYPE(t1) -!CHECK: omp_orig size=4 offset=4: ObjectEntity type: TYPE(t1) -!CHECK: omp_out size=4 offset=8: ObjectEntity type: TYPE(t1) -!CHECK: omp_priv size=4 offset=12: ObjectEntity type: TYPE(t1) +!CHECK: omp_out size=4 offset=4: ObjectEntity type: TYPE(t1) +!CHECK: OtherConstruct scope: size=8 alignment=4 sourceRange=0 bytes +!CHECK: omp_orig size=4 offset=0: ObjectEntity type: TYPE(t1) +!CHECK: omp_priv size=4 offset=4: ObjectEntity type: TYPE(t1) result = t1(1.0) !$omp parallel do reduction(.modmul.:result) do i = 1, 10 diff --git a/flang/test/Semantics/OpenMP/declare-reduction.f90 b/flang/test/Semantics/OpenMP/declare-reduction.f90 index 1f39c57..c8dee5e 100644 --- a/flang/test/Semantics/OpenMP/declare-reduction.f90 +++ b/flang/test/Semantics/OpenMP/declare-reduction.f90 @@ -19,10 +19,12 @@ function func(x, n, init) !$omp declare reduction(red_add:integer(4):omp_out=omp_out+omp_in) initializer(initme(omp_priv,0)) !CHECK: red_add: UserReductionDetails !CHECK: Subprogram scope: initme +!CHECK: OtherConstruct scope: !CHECK: omp_in size=4 offset=0: ObjectEntity type: INTEGER(4) -!CHECK: omp_orig size=4 offset=4: ObjectEntity type: INTEGER(4) -!CHECK: omp_out size=4 offset=8: ObjectEntity type: INTEGER(4) -!CHECK: omp_priv size=4 offset=12: ObjectEntity type: INTEGER(4) +!CHECK: omp_out size=4 offset=4: ObjectEntity type: INTEGER(4) +!CHECK: OtherConstruct scope: +!CHECK: omp_orig size=4 offset=0: ObjectEntity type: INTEGER(4) +!CHECK: omp_priv size=4 offset=4: ObjectEntity type: INTEGER(4) !$omp simd reduction(red_add:res) do i=1,n res=res+x(i) @@ -36,9 +38,11 @@ program main !$omp declare reduction (my_add_red : integer : omp_out = omp_out + omp_in) initializer (omp_priv=0) !CHECK: my_add_red: UserReductionDetails +!CHECK: OtherConstruct scope: !CHECK: omp_in size=4 offset=0: ObjectEntity type: INTEGER(4) -!CHECK: omp_orig size=4 offset=4: ObjectEntity type: INTEGER(4) -!CHECK: omp_out size=4 offset=8: ObjectEntity type: INTEGER(4) -!CHECK: omp_priv size=4 offset=12: ObjectEntity type: INTEGER(4) +!CHECK: omp_out size=4 offset=4: ObjectEntity type: INTEGER(4) +!CHECK: OtherConstruct scope: +!CHECK: omp_orig size=4 offset=0: ObjectEntity type: INTEGER(4) +!CHECK: omp_priv size=4 offset=4: ObjectEntity type: INTEGER(4) end program main diff --git a/libc/utils/hdrgen/hdrgen/header.py b/libc/utils/hdrgen/hdrgen/header.py index 2118db6..715d4b7 100644 --- a/libc/utils/hdrgen/hdrgen/header.py +++ b/libc/utils/hdrgen/hdrgen/header.py @@ -241,7 +241,5 @@ class HeaderFile: return { "name": self.name, "standards": self.standards, - "includes": [ - str(file) for file in sorted({COMMON_HEADER} | self.includes()) - ], + "includes": sorted(str(file) for file in {COMMON_HEADER} | self.includes()), } diff --git a/libunwind/src/DwarfParser.hpp b/libunwind/src/DwarfParser.hpp index dbd7d65..2b04ae2 100644 --- a/libunwind/src/DwarfParser.hpp +++ b/libunwind/src/DwarfParser.hpp @@ -842,12 +842,10 @@ bool CFI_Parser<A>::parseFDEInstructions(A &addressSpace, results->savedRegisters[UNW_AARCH64_RA_SIGN_STATE].value ^ 0x3; results->setRegisterValue(UNW_AARCH64_RA_SIGN_STATE, value, initialState); - // When calculating the value of the PC, it is assumed that the CFI - // instruction is placed before the signing instruction, however it is - // placed after. Because of this, we need to take into account the CFI - // instruction is one instruction call later than expected, and reduce - // the PC value by 4 bytes to compensate. - results->ptrAuthDiversifier = fdeInfo.pcStart + codeOffset - 0x4; + // When using Feat_PAuthLR, the PC value needs to be captured so that + // during unwinding, the correct PC value is used for re-authentication. + // It is assumed that the CFI is placed before the signing instruction. + results->ptrAuthDiversifier = fdeInfo.pcStart + codeOffset; _LIBUNWIND_TRACE_DWARF( "DW_CFA_AARCH64_negate_ra_state_with_pc(pc=0x%" PRIx64 ")\n", static_cast<uint64_t>(results->ptrAuthDiversifier)); diff --git a/lld/test/wasm/lto/relocation-model.ll b/lld/test/wasm/lto/relocation-model.ll index 8fe198d..a042615 100644 --- a/lld/test/wasm/lto/relocation-model.ll +++ b/lld/test/wasm/lto/relocation-model.ll @@ -8,6 +8,11 @@ ; RUN: wasm-ld %t.o -o %t_static.wasm -save-temps -r -mllvm -relocation-model=static ; RUN: llvm-readobj -r %t_static.wasm.lto.o | FileCheck %s --check-prefix=STATIC +;; Linking with --unresolved-symbols=import-dynamic should also generate PIC +;; code for external references. +; RUN: wasm-ld %t.o -o %t_import.wasm -save-temps --experimental-pic --unresolved-symbols=import-dynamic +; RUN: llvm-readobj -r %t_import.wasm.lto.o | FileCheck %s --check-prefix=PIC + ; PIC: R_WASM_GLOBAL_INDEX_LEB foo ; STATIC: R_WASM_MEMORY_ADDR_LEB foo diff --git a/lld/wasm/LTO.cpp b/lld/wasm/LTO.cpp index ae85f46..668cdf2 100644 --- a/lld/wasm/LTO.cpp +++ b/lld/wasm/LTO.cpp @@ -63,6 +63,12 @@ static lto::Config createConfig() { c.RelocModel = std::nullopt; else if (ctx.isPic) c.RelocModel = Reloc::PIC_; + else if (ctx.arg.unresolvedSymbols == UnresolvedPolicy::ImportDynamic) + // With ImportDynamic we also need to use the PIC relocation model so that + // external symbols are references via the GOT. + // TODO(sbc): This should probably be Reloc::DynamicNoPIC, but the backend + // doesn't currently support that. + c.RelocModel = Reloc::PIC_; else c.RelocModel = Reloc::Static; diff --git a/lldb/docs/resources/build.rst b/lldb/docs/resources/build.rst index 0db8c92..2eb1677 100644 --- a/lldb/docs/resources/build.rst +++ b/lldb/docs/resources/build.rst @@ -95,37 +95,31 @@ commands below. Windows ******* -* Visual Studio 2019. -* The latest Windows SDK. -* The Active Template Library (ATL). -* `GnuWin32 <http://gnuwin32.sourceforge.net/>`_ for CoreUtils and Make. -* `Python 3 <https://www.python.org/downloads/windows/>`_. Make sure to (1) get - the x64 variant if that's what you're targeting and (2) install the debug - library if you want to build a debug lldb. The standalone installer is the - easiest way to get the debug library. -* `Python Tools for Visual Studio - <https://github.com/Microsoft/PTVS/>`_. If you plan to debug test failures - or even write new tests at all, PTVS is an indispensable debugging - extension to VS that enables full editing and debugging support for Python - (including mixed native/managed debugging). -* `SWIG for Windows <http://www.swig.org/download.html>`_ - -The steps outlined here describes how to set up your system and install the -required dependencies such that they can be found when needed during the build -process. They only need to be performed once. - -#. Install Visual Studio with the "Desktop Development with C++" workload and - the "Python Development" workload. -#. Install GnuWin32, making sure ``<GnuWin32 install dir>\bin`` is added to - your PATH environment variable. Verify that utilities like ``dirname`` and - ``make`` are available from your terminal. -#. Install SWIG for Windows, making sure ``<SWIG install dir>`` is added to - your PATH environment variable. Verify that ``swig`` is available from your - terminal. -#. Install Python 3 from the standalone installer and include the debug libraries - in the install, making sure the Python install path is added to your PATH - environment variable. -#. Register the Debug Interface Access DLLs with the Registry from a privileged +The steps outlined here describe how to set up your system and install the +required dependencies for building and testing LLDB on Windows. They only need +to be performed once. + +Build Requirements +^^^^^^^^^^^^^^^^^^ + +Please follow the steps below if you only want to **build** lldb. + +1. Install `Visual Studio <https://visualstudio.microsoft.com>` with the + "Desktop Development with C++" workload. Make sure that the latest Windows + SDK and the Active Template Library (ATL) are installed. +2. Install `Git Bash <https://git-scm.com/install/windows>`_ and add + ``<Git install dir>\usr\bin`` to your ``PATH``. Verify that utilities like + ``dirname`` are available from your terminal. +3. Install `make <https://sourceforge.net/projects/ezwinports/files/>`_ and + verify that it's in your ``PATH``. +4. Install `Python 3 <https://www.python.org/downloads/windows/>`_ from the + GUI installer. If you will be building LLDB in Debug mode, **include the + debug libraries** during the install. Make sure ``python`` is added to your + ``PATH``. +5. Install `SWIG for Windows <http://www.swig.org/download.html>`_. Make sure + ``swig`` is added to your ``PATH`` and that ``swig -swiglib`` points to the + correct directory. +6. Register the Debug Interface Access DLLs with the Registry from a privileged terminal. :: @@ -139,6 +133,16 @@ Prompt for VS <https://docs.microsoft.com/en-us/visualstudio/ide/reference/comma corresponding to the version you wish to use or run ``vcvarsall.bat`` or ``VsDevCmd.bat``. +Test Requirements +^^^^^^^^^^^^^^^^^ + +Please follow the steps above and below if you want to **test** `lldb`. + +* Install `Python Tools for Visual Studio <https://github.com/Microsoft/PTVS/>`_, + an indispensable debugging extension to Visual Studio which enables full + editing and debugging support for Python (including mixed native/managed + debugging). + macOS ***** diff --git a/llvm/include/llvm/Analysis/IR2Vec.h b/llvm/include/llvm/Analysis/IR2Vec.h index 5ad6288..71055dd16 100644 --- a/llvm/include/llvm/Analysis/IR2Vec.h +++ b/llvm/include/llvm/Analysis/IR2Vec.h @@ -161,7 +161,7 @@ private: public: /// Default constructor creates empty storage (invalid state) - VocabStorage() : Sections(), TotalSize(0), Dimension(0) {} + VocabStorage() = default; /// Create a VocabStorage with pre-organized section data VocabStorage(std::vector<std::vector<Embedding>> &&SectionData); diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h index b0601eb..36cb90b 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -640,7 +640,8 @@ public: /// This variant does not erase \p MI after calling the build function. void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo) const; - bool matchOrShiftToFunnelShift(MachineInstr &MI, BuildFnTy &MatchInfo) const; + bool matchOrShiftToFunnelShift(MachineInstr &MI, bool AllowScalarConstants, + BuildFnTy &MatchInfo) const; bool matchFunnelShiftToRotate(MachineInstr &MI) const; void applyFunnelShiftToRotate(MachineInstr &MI) const; bool matchRotateOutOfRange(MachineInstr &MI) const; diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index 4058dd7..1920b98 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -3761,7 +3761,7 @@ private: /// register class is the largest legal super-reg register class of the /// register class of the specified type. e.g. On x86, i8, i16, and i32's /// representative class would be GR32. - const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE] = {0}; + const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE] = {nullptr}; /// This indicates the "cost" of the "representative" register class for each /// ValueType. The cost is used by the scheduler to approximate register diff --git a/llvm/include/llvm/IR/IntrinsicsDirectX.td b/llvm/include/llvm/IR/IntrinsicsDirectX.td index 3b7077c..d6b8563 100644 --- a/llvm/include/llvm/IR/IntrinsicsDirectX.td +++ b/llvm/include/llvm/IR/IntrinsicsDirectX.td @@ -153,6 +153,8 @@ def int_dx_wave_any : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_i1_ty], [IntrCon def int_dx_wave_getlaneindex : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrConvergent, IntrNoMem]>; def int_dx_wave_reduce_max : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>; def int_dx_wave_reduce_umax : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>; +def int_dx_wave_reduce_min : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>; +def int_dx_wave_reduce_umin : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>; def int_dx_wave_reduce_sum : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>; def int_dx_wave_reduce_usum : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>; def int_dx_wave_is_first_lane : DefaultAttrsIntrinsic<[llvm_i1_ty], [], [IntrConvergent]>; diff --git a/llvm/include/llvm/IR/IntrinsicsSPIRV.td b/llvm/include/llvm/IR/IntrinsicsSPIRV.td index 49a182be..bc51fb6 100644 --- a/llvm/include/llvm/IR/IntrinsicsSPIRV.td +++ b/llvm/include/llvm/IR/IntrinsicsSPIRV.td @@ -122,6 +122,8 @@ def int_spv_rsqrt : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty] def int_spv_wave_any : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_i1_ty], [IntrConvergent, IntrNoMem]>; def int_spv_wave_reduce_umax : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>; def int_spv_wave_reduce_max : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>; + def int_spv_wave_reduce_min : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>; + def int_spv_wave_reduce_umin : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>; def int_spv_wave_reduce_sum : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>; def int_spv_wave_is_first_lane : DefaultAttrsIntrinsic<[llvm_i1_ty], [], [IntrConvergent]>; def int_spv_wave_readlane : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>, llvm_i32_ty], [IntrConvergent, IntrNoMem]>; @@ -136,7 +138,7 @@ def int_spv_rsqrt : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty] def int_spv_sclamp : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>; def int_spv_nclamp : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>; - // Create resource handle given the binding information. Returns a + // Create resource handle given the binding information. Returns a // type appropriate for the kind of resource given the set id, binding id, // array size of the binding, as well as an index and an indicator // whether that index may be non-uniform. diff --git a/llvm/include/llvm/IR/Mangler.h b/llvm/include/llvm/IR/Mangler.h index 232101a..4d387ba 100644 --- a/llvm/include/llvm/IR/Mangler.h +++ b/llvm/include/llvm/IR/Mangler.h @@ -80,8 +80,7 @@ getArm64ECDemangledFunctionName(StringRef Name); /// Check if an ARM64EC function name is mangled. bool inline isArm64ECMangledFunctionName(StringRef Name) { - return Name[0] == '#' || - (Name[0] == '?' && Name.find("@$$h") != StringRef::npos); + return Name[0] == '#' || (Name[0] == '?' && Name.contains("@$$h")); } } // End llvm namespace diff --git a/llvm/include/llvm/Support/FormattedStream.h b/llvm/include/llvm/Support/FormattedStream.h index 011a6ae..402cd3e 100644 --- a/llvm/include/llvm/Support/FormattedStream.h +++ b/llvm/include/llvm/Support/FormattedStream.h @@ -180,7 +180,8 @@ public: return *this; } - raw_ostream &changeColor(enum Colors Color, bool Bold, bool BG) override { + raw_ostream &changeColor(enum Colors Color, bool Bold = false, + bool BG = false) override { if (colors_enabled()) { DisableScanScope S(this); raw_ostream::changeColor(Color, Bold, BG); diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td index 47d5d68..119695e 100644 --- a/llvm/include/llvm/Target/GlobalISel/Combine.td +++ b/llvm/include/llvm/Target/GlobalISel/Combine.td @@ -1013,10 +1013,18 @@ def extract_vec_elt_combines : GICombineGroup<[ def funnel_shift_from_or_shift : GICombineRule< (defs root:$root, build_fn_matchinfo:$info), (match (wip_match_opcode G_OR):$root, - [{ return Helper.matchOrShiftToFunnelShift(*${root}, ${info}); }]), + [{ return Helper.matchOrShiftToFunnelShift(*${root}, false, ${info}); }]), (apply [{ Helper.applyBuildFn(*${root}, ${info}); }]) >; +def funnel_shift_from_or_shift_constants_are_legal : GICombineRule< + (defs root:$root, build_fn_matchinfo:$info), + (match (wip_match_opcode G_OR):$root, + [{ return Helper.matchOrShiftToFunnelShift(*${root}, true, ${info}); }]), + (apply [{ Helper.applyBuildFn(*${root}, ${info}); }]) +>; + + def funnel_shift_to_rotate : GICombineRule< (defs root:$root), (match (wip_match_opcode G_FSHL, G_FSHR):$root, diff --git a/llvm/lib/Analysis/HeatUtils.cpp b/llvm/lib/Analysis/HeatUtils.cpp index a1cc707..08e9428 100644 --- a/llvm/lib/Analysis/HeatUtils.cpp +++ b/llvm/lib/Analysis/HeatUtils.cpp @@ -64,10 +64,7 @@ std::string llvm::getHeatColor(uint64_t Freq, uint64_t MaxFreq) { } std::string llvm::getHeatColor(double Percent) { - if (Percent > 1.0) - Percent = 1.0; - if (Percent < 0.0) - Percent = 0.0; + Percent = std::clamp(Percent, 0.0, 1.0); unsigned ColorID = unsigned(round(Percent * (HeatSize - 1.0))); return HeatPalette[ColorID]; } diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp index 1f10478..9ace7d6 100644 --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -4425,6 +4425,7 @@ void CombinerHelper::applyBuildFnNoErase( } bool CombinerHelper::matchOrShiftToFunnelShift(MachineInstr &MI, + bool AllowScalarConstants, BuildFnTy &MatchInfo) const { assert(MI.getOpcode() == TargetOpcode::G_OR); @@ -4444,31 +4445,29 @@ bool CombinerHelper::matchOrShiftToFunnelShift(MachineInstr &MI, // Given constants C0 and C1 such that C0 + C1 is bit-width: // (or (shl x, C0), (lshr y, C1)) -> (fshl x, y, C0) or (fshr x, y, C1) - int64_t CstShlAmt, CstLShrAmt; + int64_t CstShlAmt = 0, CstLShrAmt; if (mi_match(ShlAmt, MRI, m_ICstOrSplat(CstShlAmt)) && mi_match(LShrAmt, MRI, m_ICstOrSplat(CstLShrAmt)) && CstShlAmt + CstLShrAmt == BitWidth) { FshOpc = TargetOpcode::G_FSHR; Amt = LShrAmt; - } else if (mi_match(LShrAmt, MRI, m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) && ShlAmt == Amt) { // (or (shl x, amt), (lshr y, (sub bw, amt))) -> (fshl x, y, amt) FshOpc = TargetOpcode::G_FSHL; - } else if (mi_match(ShlAmt, MRI, m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) && LShrAmt == Amt) { // (or (shl x, (sub bw, amt)), (lshr y, amt)) -> (fshr x, y, amt) FshOpc = TargetOpcode::G_FSHR; - } else { return false; } LLT AmtTy = MRI.getType(Amt); - if (!isLegalOrBeforeLegalizer({FshOpc, {Ty, AmtTy}})) + if (!isLegalOrBeforeLegalizer({FshOpc, {Ty, AmtTy}}) && + (!AllowScalarConstants || CstShlAmt == 0 || !Ty.isScalar())) return false; MatchInfo = [=](MachineIRBuilder &B) { diff --git a/llvm/lib/DebugInfo/GSYM/GsymCreator.cpp b/llvm/lib/DebugInfo/GSYM/GsymCreator.cpp index 93ff3b9..d87cb4d 100644 --- a/llvm/lib/DebugInfo/GSYM/GsymCreator.cpp +++ b/llvm/lib/DebugInfo/GSYM/GsymCreator.cpp @@ -552,7 +552,7 @@ llvm::Error GsymCreator::saveSegments(StringRef Path, createSegment(SegmentSize, FuncIdx); if (ExpectedGC) { GsymCreator *GC = ExpectedGC->get(); - if (GC == NULL) + if (!GC) break; // We had not more functions to encode. // Don't collect any messages at all OutputAggregator Out(nullptr); diff --git a/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderPerf.cpp b/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderPerf.cpp index 1a61d31..e609a7d 100644 --- a/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderPerf.cpp +++ b/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderPerf.cpp @@ -55,7 +55,7 @@ struct PerfState { std::unique_ptr<raw_fd_ostream> Dumpstream; // perf mmap marker - void *MarkerAddr = NULL; + void *MarkerAddr = nullptr; }; // prevent concurrent dumps from messing up the output file diff --git a/llvm/lib/SandboxIR/Context.cpp b/llvm/lib/SandboxIR/Context.cpp index 70ac68a..fb6ff62 100644 --- a/llvm/lib/SandboxIR/Context.cpp +++ b/llvm/lib/SandboxIR/Context.cpp @@ -443,7 +443,7 @@ Argument *Context::getOrCreateArgument(llvm::Argument *LLVMArg) { } Constant *Context::getOrCreateConstant(llvm::Constant *LLVMC) { - return cast<Constant>(getOrCreateValueInternal(LLVMC, 0)); + return cast<Constant>(getOrCreateValueInternal(LLVMC, nullptr)); } BasicBlock *Context::createBasicBlock(llvm::BasicBlock *LLVMBB) { diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td index b3ec65c..2783147 100644 --- a/llvm/lib/Target/AArch64/AArch64Combine.td +++ b/llvm/lib/Target/AArch64/AArch64Combine.td @@ -366,6 +366,7 @@ def AArch64PostLegalizerCombiner select_to_minmax, or_to_bsp, combine_concat_vector, commute_constant_to_rhs, extract_vec_elt_combines, push_freeze_to_prevent_poison_from_propagating, - combine_mul_cmlt, combine_use_vector_truncate, - extmultomull, truncsat_combines, lshr_of_trunc_of_lshr]> { + combine_mul_cmlt, combine_use_vector_truncate, + extmultomull, truncsat_combines, lshr_of_trunc_of_lshr, + funnel_shift_from_or_shift_constants_are_legal]> { } diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h index ce2b4a5..cd8b249 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPU.h +++ b/llvm/lib/Target/AMDGPU/AMDGPU.h @@ -562,9 +562,13 @@ public: void initializeAMDGPURewriteAGPRCopyMFMALegacyPass(PassRegistry &); extern char &AMDGPURewriteAGPRCopyMFMALegacyID; +void initializeAMDGPUUniformIntrinsicCombineLegacyPass(PassRegistry &); +extern char &AMDGPUUniformIntrinsicCombineLegacyPassID; +FunctionPass *createAMDGPUUniformIntrinsicCombineLegacyPass(); + struct AMDGPUUniformIntrinsicCombinePass : public PassInfoMixin<AMDGPUUniformIntrinsicCombinePass> { - PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM); + PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); }; namespace AMDGPU { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h index 0eb00cb..529da8d 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h @@ -50,6 +50,7 @@ const D16ImageDimIntrinsic *lookupD16ImageDimIntrinsic(unsigned Intr); struct ImageDimIntrinsicInfo { unsigned Intr; unsigned BaseOpcode; + unsigned AtomicNoRetBaseOpcode; MIMGDim Dim; uint8_t NumOffsetArgs; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp index 97c2c9c..9ce1224 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -2006,19 +2006,27 @@ bool AMDGPUInstructionSelector::selectImageIntrinsic( MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const { MachineBasicBlock *MBB = MI.getParent(); const DebugLoc &DL = MI.getDebugLoc(); + unsigned IntrOpcode = Intr->BaseOpcode; + + // For image atomic: use no-return opcode if result is unused. + if (Intr->AtomicNoRetBaseOpcode != Intr->BaseOpcode) { + Register ResultDef = MI.getOperand(0).getReg(); + if (MRI->use_nodbg_empty(ResultDef)) + IntrOpcode = Intr->AtomicNoRetBaseOpcode; + } const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = - AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); + AMDGPU::getMIMGBaseOpcodeInfo(IntrOpcode); const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); - unsigned IntrOpcode = Intr->BaseOpcode; const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI); const bool IsGFX11Plus = AMDGPU::isGFX11Plus(STI); const bool IsGFX12Plus = AMDGPU::isGFX12Plus(STI); const unsigned ArgOffset = MI.getNumExplicitDefs() + 1; - Register VDataIn, VDataOut; + Register VDataIn = AMDGPU::NoRegister; + Register VDataOut = AMDGPU::NoRegister; LLT VDataTy; int NumVDataDwords = -1; bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 || @@ -2049,7 +2057,8 @@ bool AMDGPUInstructionSelector::selectImageIntrinsic( unsigned DMaskLanes = 0; if (BaseOpcode->Atomic) { - VDataOut = MI.getOperand(0).getReg(); + if (!BaseOpcode->NoReturn) + VDataOut = MI.getOperand(0).getReg(); VDataIn = MI.getOperand(2).getReg(); LLT Ty = MRI->getType(VDataIn); @@ -2099,8 +2108,9 @@ bool AMDGPUInstructionSelector::selectImageIntrinsic( assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this"); unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(); - if (BaseOpcode->Atomic) - CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization + // Keep GLC only when the atomic's result is actually used. + if (BaseOpcode->Atomic && !BaseOpcode->NoReturn) + CPol |= AMDGPU::CPol::GLC; if (CPol & ~((IsGFX12Plus ? AMDGPU::CPol::ALL : AMDGPU::CPol::ALL_pregfx12) | AMDGPU::CPol::VOLATILE)) return false; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def index a6074ea..bf6f1a9 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def +++ b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def @@ -30,7 +30,6 @@ MODULE_PASS("amdgpu-preload-kernel-arguments", AMDGPUPreloadKernelArgumentsPass( MODULE_PASS("amdgpu-printf-runtime-binding", AMDGPUPrintfRuntimeBindingPass()) MODULE_PASS("amdgpu-remove-incompatible-functions", AMDGPURemoveIncompatibleFunctionsPass(*this)) MODULE_PASS("amdgpu-sw-lower-lds", AMDGPUSwLowerLDSPass(*this)) -MODULE_PASS("amdgpu-uniform-intrinsic-combine", AMDGPUUniformIntrinsicCombinePass()) #undef MODULE_PASS #ifndef MODULE_PASS_WITH_PARAMS @@ -69,6 +68,7 @@ FUNCTION_PASS("amdgpu-unify-divergent-exit-nodes", AMDGPUUnifyDivergentExitNodesPass()) FUNCTION_PASS("amdgpu-usenative", AMDGPUUseNativeCallsPass()) FUNCTION_PASS("si-annotate-control-flow", SIAnnotateControlFlowPass(*static_cast<const GCNTargetMachine *>(this))) +FUNCTION_PASS("amdgpu-uniform-intrinsic-combine", AMDGPUUniformIntrinsicCombinePass()) #undef FUNCTION_PASS #ifndef FUNCTION_ANALYSIS diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index 6214f4d..75a94ac 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -619,6 +619,7 @@ extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() { initializeAMDGPUPreloadKernArgPrologLegacyPass(*PR); initializeAMDGPUWaitSGPRHazardsLegacyPass(*PR); initializeAMDGPUPreloadKernelArgumentsLegacyPass(*PR); + initializeAMDGPUUniformIntrinsicCombineLegacyPass(*PR); } static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { @@ -887,9 +888,6 @@ void AMDGPUTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) { if (EarlyInlineAll && !EnableFunctionCalls) PM.addPass(AMDGPUAlwaysInlinePass()); - - if (EnableUniformIntrinsicCombine) - PM.addPass(AMDGPUUniformIntrinsicCombinePass()); }); PB.registerPeepholeEPCallback( @@ -900,6 +898,9 @@ void AMDGPUTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) { FPM.addPass(AMDGPUUseNativeCallsPass()); if (EnableLibCallSimplify) FPM.addPass(AMDGPUSimplifyLibCallsPass()); + + if (EnableUniformIntrinsicCombine) + FPM.addPass(AMDGPUUniformIntrinsicCombinePass()); }); PB.registerCGSCCOptimizerLateEPCallback( diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp index 50c78d8..65e6ed9 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp @@ -16,12 +16,6 @@ /// uniformity. And every instruction that's downstream and cares about dynamic /// uniformity must be convergent (and isel will introduce v_readfirstlane for /// them if their operands can't be proven statically uniform). -/// -/// This pass is implemented as a ModulePass because intrinsic declarations -/// exist at the module scope, allowing us to skip processing entirely if no -/// declarations are present and to traverse their user lists directly when -/// they are. A FunctionPass would instead require scanning every instruction -/// in every function to find relevant intrinsics, which is far less efficient. //===----------------------------------------------------------------------===// #include "AMDGPU.h" @@ -97,14 +91,12 @@ static bool optimizeUniformIntrinsic(IntrinsicInst &II, Tracker[NotOp] = true; // NOT preserves uniformity LLVM_DEBUG(dbgs() << "Replacing ICMP_EQ: " << *NotOp << '\n'); ICmp->replaceAllUsesWith(NotOp); - ICmp->eraseFromParent(); Changed = true; } else if (Pred == ICmpInst::ICMP_NE && match(OtherOp, m_Zero())) { // Case: (icmp ne %ballot, 0) -> %ballot_arg LLVM_DEBUG(dbgs() << "Replacing ICMP_NE with ballot argument: " << *Src << '\n'); ICmp->replaceAllUsesWith(Src); - ICmp->eraseFromParent(); Changed = true; } } @@ -120,15 +112,17 @@ static bool optimizeUniformIntrinsic(IntrinsicInst &II, return false; } -/// Iterates over intrinsic declarations in the module to optimize their uses. -static bool runUniformIntrinsicCombine(Module &M, ModuleAnalysisManager &AM) { +/// Iterates over intrinsic calls in the Function to optimize. +static bool runUniformIntrinsicCombine(Function &F, const UniformityInfo &UI) { bool IsChanged = false; ValueMap<const Value *, bool> Tracker; - FunctionAnalysisManager &FAM = - AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); - for (Function &F : M) { - switch (F.getIntrinsicID()) { + for (Instruction &I : make_early_inc_range(instructions(F))) { + auto *II = dyn_cast<IntrinsicInst>(&I); + if (!II) + continue; + + switch (II->getIntrinsicID()) { case Intrinsic::amdgcn_permlane64: case Intrinsic::amdgcn_readfirstlane: case Intrinsic::amdgcn_readlane: @@ -137,23 +131,61 @@ static bool runUniformIntrinsicCombine(Module &M, ModuleAnalysisManager &AM) { default: continue; } - - for (User *U : make_early_inc_range(F.users())) { - auto *II = cast<IntrinsicInst>(U); - Function *ParentF = II->getFunction(); - const auto &UI = FAM.getResult<UniformityInfoAnalysis>(*ParentF); - IsChanged |= optimizeUniformIntrinsic(*II, UI, Tracker); - } + IsChanged |= optimizeUniformIntrinsic(*II, UI, Tracker); } return IsChanged; } PreservedAnalyses -AMDGPUUniformIntrinsicCombinePass::run(Module &M, ModuleAnalysisManager &AM) { - if (!runUniformIntrinsicCombine(M, AM)) +AMDGPUUniformIntrinsicCombinePass::run(Function &F, + FunctionAnalysisManager &AM) { + const auto &UI = AM.getResult<UniformityInfoAnalysis>(F); + if (!runUniformIntrinsicCombine(F, UI)) return PreservedAnalyses::all(); PreservedAnalyses PA; PA.preserve<UniformityInfoAnalysis>(); return PA; } + +namespace { +class AMDGPUUniformIntrinsicCombineLegacy : public FunctionPass { +public: + static char ID; + AMDGPUUniformIntrinsicCombineLegacy() : FunctionPass(ID) { + initializeAMDGPUUniformIntrinsicCombineLegacyPass( + *PassRegistry::getPassRegistry()); + } + +private: + bool runOnFunction(Function &F) override; + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + AU.addRequired<UniformityInfoWrapperPass>(); + AU.addRequired<TargetPassConfig>(); + } +}; +} // namespace + +char AMDGPUUniformIntrinsicCombineLegacy::ID = 0; +char &llvm::AMDGPUUniformIntrinsicCombineLegacyPassID = + AMDGPUUniformIntrinsicCombineLegacy::ID; + +bool AMDGPUUniformIntrinsicCombineLegacy::runOnFunction(Function &F) { + if (skipFunction(F)) + return false; + const UniformityInfo &UI = + getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo(); + return runUniformIntrinsicCombine(F, UI); +} + +INITIALIZE_PASS_BEGIN(AMDGPUUniformIntrinsicCombineLegacy, DEBUG_TYPE, + "AMDGPU Uniform Intrinsic Combine", false, false) +INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass) +INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) +INITIALIZE_PASS_END(AMDGPUUniformIntrinsicCombineLegacy, DEBUG_TYPE, + "AMDGPU Uniform Intrinsic Combine", false, false) + +FunctionPass *llvm::createAMDGPUUniformIntrinsicCombineLegacyPass() { + return new AMDGPUUniformIntrinsicCombineLegacy(); +} diff --git a/llvm/lib/Target/AMDGPU/MIMGInstructions.td b/llvm/lib/Target/AMDGPU/MIMGInstructions.td index 5f6d742..d950131 100644 --- a/llvm/lib/Target/AMDGPU/MIMGInstructions.td +++ b/llvm/lib/Target/AMDGPU/MIMGInstructions.td @@ -877,69 +877,69 @@ multiclass MIMG_Store <mimgopc op, string asm, bit has_d16, bit mip = 0> { } class MIMG_Atomic_gfx6789_base <bits<8> op, string asm, RegisterOperand data_rc, - RegisterClass addr_rc, string dns=""> - : MIMG_gfx6789 <op, (outs data_rc:$vdst), dns> { - let Constraints = "$vdst = $vdata"; - + RegisterClass addr_rc, bit noRtn, string dns=""> + : MIMG_gfx6789 <op, !if(noRtn, (outs), (outs data_rc:$vdst)), dns> { + let Constraints = !if(noRtn, "", "$vdst = $vdata"); + let isCodeGenOnly = noRtn; let InOperandList = (ins data_rc:$vdata, addr_rc:$vaddr, SReg_256_XNULL:$srsrc, DMask:$dmask, UNorm:$unorm, CPol:$cpol, R128A16:$r128, TFE:$tfe, LWE:$lwe, DA:$da); - let AsmString = asm#" $vdst, $vaddr, $srsrc$dmask$unorm$cpol$r128$tfe$lwe$da"; + let AsmString = asm#" $vdata, $vaddr, $srsrc$dmask$unorm$cpol$r128$tfe$lwe$da"; } class MIMG_Atomic_gfx90a_base <bits<8> op, string asm, RegisterOperand data_rc, - RegisterClass addr_rc, string dns=""> - : MIMG_gfx90a <op, (outs getAlign2RegOp<data_rc>.ret:$vdst), dns> { - let Constraints = "$vdst = $vdata"; - + RegisterClass addr_rc, bit noRtn, string dns=""> + : MIMG_gfx90a <op, !if(noRtn, (outs), (outs getAlign2RegOp<data_rc>.ret:$vdst)), dns> { + let Constraints = !if(noRtn, "", "$vdst = $vdata"); + let isCodeGenOnly = noRtn; let InOperandList = (ins getAlign2RegOp<data_rc>.ret:$vdata, addr_rc:$vaddr, SReg_256_XNULL:$srsrc, DMask:$dmask, UNorm:$unorm, CPol:$cpol, R128A16:$r128, LWE:$lwe, DA:$da); - let AsmString = asm#" $vdst, $vaddr, $srsrc$dmask$unorm$cpol$r128$lwe$da"; + let AsmString = asm#" $vdata, $vaddr, $srsrc$dmask$unorm$cpol$r128$lwe$da"; } class MIMG_Atomic_si<mimgopc op, string asm, RegisterOperand data_rc, - RegisterClass addr_rc, bit enableDasm = 0> - : MIMG_Atomic_gfx6789_base<op.SI, asm, data_rc, addr_rc, + RegisterClass addr_rc, bit noRtn = 0, bit enableDasm = 0> + : MIMG_Atomic_gfx6789_base<op.SI, asm, data_rc, addr_rc, noRtn, !if(enableDasm, "GFX6GFX7", "")> { let AssemblerPredicate = isGFX6GFX7; } class MIMG_Atomic_vi<mimgopc op, string asm, RegisterOperand data_rc, - RegisterClass addr_rc, bit enableDasm = 0> - : MIMG_Atomic_gfx6789_base<op.VI, asm, data_rc, addr_rc, !if(enableDasm, "GFX8", "")> { + RegisterClass addr_rc, bit noRtn = 0, bit enableDasm = 0> + : MIMG_Atomic_gfx6789_base<op.VI, asm, data_rc, addr_rc, noRtn, !if(enableDasm, "GFX8", "")> { let AssemblerPredicate = isGFX8GFX9NotGFX90A; let MIMGEncoding = MIMGEncGfx8; } class MIMG_Atomic_gfx90a<mimgopc op, string asm, RegisterOperand data_rc, - RegisterClass addr_rc, bit enableDasm = 0> - : MIMG_Atomic_gfx90a_base<op.VI, asm, data_rc, addr_rc, !if(enableDasm, "GFX90A", "")> { + RegisterClass addr_rc, bit noRtn = 0, bit enableDasm = 0> + : MIMG_Atomic_gfx90a_base<op.VI, asm, data_rc, addr_rc, noRtn, !if(enableDasm, "GFX90A", "")> { let AssemblerPredicate = isGFX90APlus; let MIMGEncoding = MIMGEncGfx90a; } class MIMG_Atomic_gfx10<mimgopc op, string opcode, RegisterOperand DataRC, RegisterClass AddrRC, - bit enableDisasm = 0> - : MIMG_gfx10<op.GFX10M, (outs DataRC:$vdst), + bit noRtn = 0, bit enableDisasm = 0> + : MIMG_gfx10<op.GFX10M, !if(noRtn, (outs), (outs DataRC:$vdst)), !if(enableDisasm, "GFX10", "")> { - let Constraints = "$vdst = $vdata"; - + let Constraints = !if(noRtn, "", "$vdst = $vdata"); + let isCodeGenOnly = noRtn; let InOperandList = (ins DataRC:$vdata, AddrRC:$vaddr0, SReg_256_XNULL:$srsrc, DMask:$dmask, Dim:$dim, UNorm:$unorm, CPol:$cpol, R128A16:$r128, A16:$a16, TFE:$tfe, LWE:$lwe); - let AsmString = opcode#" $vdst, $vaddr0, $srsrc$dmask$dim$unorm$cpol$r128$a16$tfe$lwe"; + let AsmString = opcode#" $vdata, $vaddr0, $srsrc$dmask$dim$unorm$cpol$r128$a16$tfe$lwe"; } class MIMG_Atomic_nsa_gfx10<mimgopc op, string opcode, RegisterOperand DataRC, int num_addrs, - bit enableDisasm = 0> - : MIMG_nsa_gfx10<op.GFX10M, (outs DataRC:$vdst), num_addrs, + bit noRtn = 0, bit enableDisasm = 0> + : MIMG_nsa_gfx10<op.GFX10M, !if(noRtn, (outs), (outs DataRC:$vdst)), num_addrs, !if(enableDisasm, "GFX10", "")> { - let Constraints = "$vdst = $vdata"; - + let Constraints = !if(noRtn, "", "$vdst = $vdata"); + let isCodeGenOnly = noRtn; let InOperandList = !con((ins DataRC:$vdata), AddrIns, (ins SReg_256_XNULL:$srsrc, DMask:$dmask, @@ -950,24 +950,24 @@ class MIMG_Atomic_nsa_gfx10<mimgopc op, string opcode, class MIMG_Atomic_gfx11<mimgopc op, string opcode, RegisterOperand DataRC, RegisterClass AddrRC, - bit enableDisasm = 0> - : MIMG_gfx11<op.GFX11, (outs DataRC:$vdst), + bit noRtn = 0, bit enableDisasm = 0> + : MIMG_gfx11<op.GFX11, !if(noRtn, (outs), (outs DataRC:$vdst)), !if(enableDisasm, "GFX11", "")> { - let Constraints = "$vdst = $vdata"; - + let Constraints = !if(noRtn, "", "$vdst = $vdata"); + let isCodeGenOnly = noRtn; let InOperandList = (ins DataRC:$vdata, AddrRC:$vaddr0, SReg_256_XNULL:$srsrc, DMask:$dmask, Dim:$dim, UNorm:$unorm, CPol:$cpol, R128A16:$r128, A16:$a16, TFE:$tfe, LWE:$lwe); - let AsmString = opcode#" $vdst, $vaddr0, $srsrc$dmask$dim$unorm$cpol$r128$a16$tfe$lwe"; + let AsmString = opcode#" $vdata, $vaddr0, $srsrc$dmask$dim$unorm$cpol$r128$a16$tfe$lwe"; } class MIMG_Atomic_nsa_gfx11<mimgopc op, string opcode, RegisterOperand DataRC, int num_addrs, - bit enableDisasm = 0> - : MIMG_nsa_gfx11<op.GFX11, (outs DataRC:$vdst), num_addrs, + bit noRtn = 0, bit enableDisasm = 0> + : MIMG_nsa_gfx11<op.GFX11, !if(noRtn, (outs), (outs DataRC:$vdst)), num_addrs, !if(enableDisasm, "GFX11", "")> { - let Constraints = "$vdst = $vdata"; - + let Constraints = !if(noRtn, "", "$vdst = $vdata"); + let isCodeGenOnly = noRtn; let InOperandList = !con((ins DataRC:$vdata), AddrIns, (ins SReg_256_XNULL:$srsrc, DMask:$dmask, @@ -977,11 +977,11 @@ class MIMG_Atomic_nsa_gfx11<mimgopc op, string opcode, } class VIMAGE_Atomic_gfx12<mimgopc op, string opcode, RegisterOperand DataRC, - int num_addrs, string renamed, bit enableDisasm = 0> - : VIMAGE_gfx12<op.GFX12, (outs DataRC:$vdst), num_addrs, + int num_addrs, string renamed, bit noRtn = 0, bit enableDisasm = 0> + : VIMAGE_gfx12<op.GFX12, !if(noRtn, (outs), (outs DataRC:$vdst)), num_addrs, !if(enableDisasm, "GFX12", "")> { - let Constraints = "$vdst = $vdata"; - + let Constraints = !if(noRtn, "", "$vdst = $vdata"); + let isCodeGenOnly = noRtn; let InOperandList = !con((ins DataRC:$vdata), AddrIns, (ins SReg_256_XNULL:$rsrc, DMask:$dmask, Dim:$dim, @@ -994,95 +994,96 @@ multiclass MIMG_Atomic_Addr_Helper_m <mimgopc op, string asm, RegisterOperand data_rc, bit enableDasm = 0, bit isFP = 0, + bit noRtn = 0, string renamed = ""> { let hasSideEffects = 1, // FIXME: remove this mayLoad = 1, mayStore = 1, hasPostISelHook = 0, DisableWQM = 1, - FPAtomic = isFP in { + FPAtomic = isFP, IsAtomicNoRet = noRtn in { let VAddrDwords = 1 in { let ssamp = 0 in { if op.HAS_SI then { - def _V1_si : MIMG_Atomic_si <op, asm, data_rc, VGPR_32, enableDasm>; + def _V1_si : MIMG_Atomic_si <op, asm, data_rc, VGPR_32, noRtn, enableDasm>; } if op.HAS_VI then { - def _V1_vi : MIMG_Atomic_vi <op, asm, data_rc, VGPR_32, enableDasm>; + def _V1_vi : MIMG_Atomic_vi <op, asm, data_rc, VGPR_32, noRtn, enableDasm>; let hasPostISelHook = 1 in - def _V1_gfx90a : MIMG_Atomic_gfx90a <op, asm, data_rc, VGPR_32, enableDasm>; + def _V1_gfx90a : MIMG_Atomic_gfx90a <op, asm, data_rc, VGPR_32, noRtn, enableDasm>; } if op.HAS_GFX10M then { - def _V1_gfx10 : MIMG_Atomic_gfx10 <op, asm, data_rc, VGPR_32, enableDasm>; + def _V1_gfx10 : MIMG_Atomic_gfx10 <op, asm, data_rc, VGPR_32, noRtn, enableDasm>; } if op.HAS_GFX11 then { - def _V1_gfx11 : MIMG_Atomic_gfx11 <op, asm, data_rc, VGPR_32, enableDasm>; + def _V1_gfx11 : MIMG_Atomic_gfx11 <op, asm, data_rc, VGPR_32, noRtn, enableDasm>; } } if op.HAS_GFX12 then { - def _V1_gfx12 : VIMAGE_Atomic_gfx12 <op, asm, data_rc, 1, renamed>; + def _V1_gfx12 : VIMAGE_Atomic_gfx12 <op, asm, data_rc, 1, renamed, noRtn>; } } let VAddrDwords = 2 in { let ssamp = 0 in { if op.HAS_SI then { - def _V2_si : MIMG_Atomic_si <op, asm, data_rc, VReg_64, 0>; + def _V2_si : MIMG_Atomic_si <op, asm, data_rc, VReg_64, noRtn, 0>; } if op.HAS_VI then { - def _V2_vi : MIMG_Atomic_vi <op, asm, data_rc, VReg_64, 0>; - def _V2_gfx90a : MIMG_Atomic_gfx90a <op, asm, data_rc, VReg_64_Align2, 0>; + def _V2_vi : MIMG_Atomic_vi <op, asm, data_rc, VReg_64, noRtn, 0>; + def _V2_gfx90a : MIMG_Atomic_gfx90a <op, asm, data_rc, VReg_64_Align2, noRtn, 0>; } if op.HAS_GFX10M then { - def _V2_gfx10 : MIMG_Atomic_gfx10 <op, asm, data_rc, VReg_64, 0>; - def _V2_nsa_gfx10 : MIMG_Atomic_nsa_gfx10 <op, asm, data_rc, 2, 0>; + def _V2_gfx10 : MIMG_Atomic_gfx10 <op, asm, data_rc, VReg_64, noRtn, 0>; + def _V2_nsa_gfx10 : MIMG_Atomic_nsa_gfx10 <op, asm, data_rc, 2, noRtn, 0>; } if op.HAS_GFX11 then { - def _V2_gfx11 : MIMG_Atomic_gfx11 <op, asm, data_rc, VReg_64, 0>; - def _V2_nsa_gfx11 : MIMG_Atomic_nsa_gfx11 <op, asm, data_rc, 2, 0>; + def _V2_gfx11 : MIMG_Atomic_gfx11 <op, asm, data_rc, VReg_64, noRtn, 0>; + def _V2_nsa_gfx11 : MIMG_Atomic_nsa_gfx11 <op, asm, data_rc, 2, noRtn, 0>; } } if op.HAS_GFX12 then { - def _V2_gfx12 : VIMAGE_Atomic_gfx12 <op, asm, data_rc, 2, renamed>; + def _V2_gfx12 : VIMAGE_Atomic_gfx12 <op, asm, data_rc, 2, renamed, noRtn>; } } let VAddrDwords = 3 in { let ssamp = 0 in { if op.HAS_SI then { - def _V3_si : MIMG_Atomic_si <op, asm, data_rc, VReg_96, 0>; + def _V3_si : MIMG_Atomic_si <op, asm, data_rc, VReg_96, noRtn, 0>; } if op.HAS_VI then { - def _V3_vi : MIMG_Atomic_vi <op, asm, data_rc, VReg_96, 0>; - def _V3_gfx90a : MIMG_Atomic_gfx90a <op, asm, data_rc, VReg_96_Align2, 0>; + def _V3_vi : MIMG_Atomic_vi <op, asm, data_rc, VReg_96, noRtn, 0>; + def _V3_gfx90a : MIMG_Atomic_gfx90a <op, asm, data_rc, VReg_96_Align2, noRtn, 0>; } if op.HAS_GFX10M then { - def _V3_gfx10 : MIMG_Atomic_gfx10 <op, asm, data_rc, VReg_96, 0>; - def _V3_nsa_gfx10 : MIMG_Atomic_nsa_gfx10 <op, asm, data_rc, 3, 0>; + def _V3_gfx10 : MIMG_Atomic_gfx10 <op, asm, data_rc, VReg_96, noRtn, 0>; + def _V3_nsa_gfx10 : MIMG_Atomic_nsa_gfx10 <op, asm, data_rc, 3, noRtn, 0>; } if op.HAS_GFX11 then { - def _V3_gfx11 : MIMG_Atomic_gfx11 <op, asm, data_rc, VReg_96, 0>; - def _V3_nsa_gfx11 : MIMG_Atomic_nsa_gfx11 <op, asm, data_rc, 3, 0>; + def _V3_gfx11 : MIMG_Atomic_gfx11 <op, asm, data_rc, VReg_96, noRtn, 0>; + def _V3_nsa_gfx11 : MIMG_Atomic_nsa_gfx11 <op, asm, data_rc, 3, noRtn, 0>; } } if op.HAS_GFX12 then { - def _V3_gfx12 : VIMAGE_Atomic_gfx12 <op, asm, data_rc, 3, renamed>; + def _V3_gfx12 : VIMAGE_Atomic_gfx12 <op, asm, data_rc, 3, renamed, noRtn>; } } let VAddrDwords = 4 in { let ssamp = 0 in { if op.HAS_SI then { - def _V4_si : MIMG_Atomic_si <op, asm, data_rc, VReg_128, 0>; + def _V4_si : MIMG_Atomic_si <op, asm, data_rc, VReg_128, noRtn, 0>; } if op.HAS_VI then { - def _V4_vi : MIMG_Atomic_vi <op, asm, data_rc, VReg_128, 0>; - def _V4_gfx90a : MIMG_Atomic_gfx90a <op, asm, data_rc, VReg_128_Align2, 0>; + def _V4_vi : MIMG_Atomic_vi <op, asm, data_rc, VReg_128, noRtn, 0>; + def _V4_gfx90a : MIMG_Atomic_gfx90a <op, asm, data_rc, VReg_128_Align2, noRtn, 0>; } if op.HAS_GFX10M then { - def _V4_gfx10 : MIMG_Atomic_gfx10 <op, asm, data_rc, VReg_128, 0>; - def _V4_nsa_gfx10 : MIMG_Atomic_nsa_gfx10 <op, asm, data_rc, 4, enableDasm>; + def _V4_gfx10 : MIMG_Atomic_gfx10 <op, asm, data_rc, VReg_128, noRtn, 0>; + def _V4_nsa_gfx10 : MIMG_Atomic_nsa_gfx10 <op, asm, data_rc, 4, noRtn, enableDasm>; } if op.HAS_GFX11 then { - def _V4_gfx11 : MIMG_Atomic_gfx11 <op, asm, data_rc, VReg_128, 0>; - def _V4_nsa_gfx11 : MIMG_Atomic_nsa_gfx11 <op, asm, data_rc, 4, enableDasm>; + def _V4_gfx11 : MIMG_Atomic_gfx11 <op, asm, data_rc, VReg_128, noRtn, 0>; + def _V4_nsa_gfx11 : MIMG_Atomic_nsa_gfx11 <op, asm, data_rc, 4, noRtn, enableDasm>; } } if op.HAS_GFX12 then { - def _V4_gfx12 : VIMAGE_Atomic_gfx12 <op, asm, data_rc, 4, renamed, enableDasm>; + def _V4_gfx12 : VIMAGE_Atomic_gfx12 <op, asm, data_rc, 4, renamed, noRtn, enableDasm>; } } } @@ -1095,12 +1096,13 @@ multiclass MIMG_Atomic_Addr_Helper_m <mimgopc op, string asm, } } -multiclass MIMG_Atomic <mimgopc op, string asm, bit isCmpSwap = 0, bit isFP = 0, - string renamed = ""> { // 64-bit atomics - let IsAtomicRet = 1 in { +multiclass MIMG_Atomic_Base <mimgopc op, string asm, bit isCmpSwap = 0, bit isFP = 0, + bit noRtn = 0, string renamed = ""> { // 64-bit atomics + let IsAtomicRet = !not(noRtn) in { def "" : MIMGBaseOpcode { let Atomic = 1; let AtomicX2 = isCmpSwap; + let NoReturn = noRtn; } let BaseOpcode = !cast<MIMGBaseOpcode>(NAME) in { @@ -1109,22 +1111,28 @@ multiclass MIMG_Atomic <mimgopc op, string asm, bit isCmpSwap = 0, bit isFP = 0, // Other variants are reconstructed by disassembler using dmask and tfe. if !not(isCmpSwap) then { let VDataDwords = 1 in - defm _V1 : MIMG_Atomic_Addr_Helper_m <op, asm, AVLdSt_32, 1, isFP, renamed>; + defm _V1 : MIMG_Atomic_Addr_Helper_m <op, asm, AVLdSt_32, 1, isFP, noRtn, renamed>; } let VDataDwords = 2 in - defm _V2 : MIMG_Atomic_Addr_Helper_m <op, asm, AVLdSt_64, isCmpSwap, isFP, renamed>; + defm _V2 : MIMG_Atomic_Addr_Helper_m <op, asm, AVLdSt_64, isCmpSwap, isFP, noRtn, renamed>; let VDataDwords = 3 in - defm _V3 : MIMG_Atomic_Addr_Helper_m <op, asm, AVLdSt_96, 0, isFP, renamed>; + defm _V3 : MIMG_Atomic_Addr_Helper_m <op, asm, AVLdSt_96, 0, isFP, noRtn, renamed>; if isCmpSwap then { let VDataDwords = 4 in - defm _V4 : MIMG_Atomic_Addr_Helper_m <op, asm, AVLdSt_128, 0, isFP, renamed>; + defm _V4 : MIMG_Atomic_Addr_Helper_m <op, asm, AVLdSt_128, 0, isFP, noRtn, renamed>; let VDataDwords = 5 in - defm _V5 : MIMG_Atomic_Addr_Helper_m <op, asm, AVLdSt_160, 0, isFP, renamed>; + defm _V5 : MIMG_Atomic_Addr_Helper_m <op, asm, AVLdSt_160, 0, isFP, noRtn, renamed>; } } - } // End IsAtomicRet = 1 + } +} + +multiclass MIMG_Atomic <mimgopc op, string asm, bit isCmpSwap = 0, bit isFP = 0, + string renamed = ""> { + defm "" : MIMG_Atomic_Base <op, asm, isCmpSwap, isFP, /*noRtn=*/0, renamed>; + defm "_NORTN" : MIMG_Atomic_Base <op, asm, isCmpSwap, isFP, /*noRtn=*/1, renamed>; } multiclass MIMG_Atomic_Renamed <mimgopc op, string asm, string renamed, @@ -1820,6 +1828,7 @@ let SubtargetPredicate = isGFX12Plus in { class ImageDimIntrinsicInfo<AMDGPUImageDimIntrinsic I> { Intrinsic Intr = I; MIMGBaseOpcode BaseOpcode = !cast<MIMGBaseOpcode>(!strconcat("IMAGE_", I.P.OpMod)); + MIMGBaseOpcode AtomicNoRetBaseOpcode = BaseOpcode; AMDGPUDimProps Dim = I.P.Dim; AMDGPUImageDimIntrinsicEval DimEval = AMDGPUImageDimIntrinsicEval<I.P>; @@ -1855,13 +1864,20 @@ class ImageDimIntrinsicInfo<AMDGPUImageDimIntrinsic I> { bits<8> CoordTyArg = !add(GradientTyArg, !if(I.P.Gradients, 1, 0)); } +class ImageDimAtomicIntrinsicInfo<AMDGPUImageDimIntrinsic I> + : ImageDimIntrinsicInfo<I> { + MIMGBaseOpcode AtomicNoRetBaseOpcode = + !cast<MIMGBaseOpcode>(!strconcat("IMAGE_", I.P.OpMod, "_NORTN")); +} + def ImageDimIntrinsicTable : GenericTable { let FilterClass = "ImageDimIntrinsicInfo"; - let Fields = ["Intr", "BaseOpcode", "Dim", "NumOffsetArgs", "NumBiasArgs", "NumZCompareArgs", "NumGradients", "NumDmask", "NumData", "NumVAddrs", "NumArgs", - "DMaskIndex", "VAddrStart", "OffsetIndex", "BiasIndex", "ZCompareIndex", "GradientStart", "CoordStart", "LodIndex", "MipIndex", "VAddrEnd", - "RsrcIndex", "SampIndex", "UnormIndex", "TexFailCtrlIndex", "CachePolicyIndex", + let Fields = ["Intr", "BaseOpcode", "AtomicNoRetBaseOpcode", "Dim", "NumOffsetArgs", "NumBiasArgs", "NumZCompareArgs", "NumGradients", "NumDmask", "NumData", + "NumVAddrs", "NumArgs", "DMaskIndex", "VAddrStart", "OffsetIndex", "BiasIndex", "ZCompareIndex", "GradientStart", "CoordStart", "LodIndex", "MipIndex", + "VAddrEnd", "RsrcIndex", "SampIndex", "UnormIndex", "TexFailCtrlIndex", "CachePolicyIndex", "BiasTyArg", "GradientTyArg", "CoordTyArg"]; string TypeOf_BaseOpcode = "MIMGBaseOpcode"; + string TypeOf_AtomicNoRetBaseOpcode = "MIMGBaseOpcode"; string TypeOf_Dim = "MIMGDim"; let PrimaryKey = ["Intr"]; @@ -1874,11 +1890,14 @@ def getImageDimIntrinsicByBaseOpcode : SearchIndex { let Key = ["BaseOpcode", "Dim"]; } -foreach intr = !listconcat(AMDGPUImageDimIntrinsics, - AMDGPUImageDimAtomicIntrinsics) in { +foreach intr = AMDGPUImageDimIntrinsics in { def : ImageDimIntrinsicInfo<intr>; } +foreach intr = AMDGPUImageDimAtomicIntrinsics in { + def : ImageDimAtomicIntrinsicInfo<intr>; +} + // L to LZ Optimization Mapping def : MIMGLZMapping<IMAGE_SAMPLE_L, IMAGE_SAMPLE_LZ>; def : MIMGLZMapping<IMAGE_SAMPLE_C_L, IMAGE_SAMPLE_C_LZ>; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index be42291..b34ab2a 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -9134,16 +9134,23 @@ SDValue SITargetLowering::lowerImage(SDValue Op, SDLoc DL(Op); MachineFunction &MF = DAG.getMachineFunction(); const GCNSubtarget *ST = &MF.getSubtarget<GCNSubtarget>(); + unsigned IntrOpcode = Intr->BaseOpcode; + // For image atomic: use no-return opcode if result is unused. + if (Intr->AtomicNoRetBaseOpcode != Intr->BaseOpcode && + !Op.getNode()->hasAnyUseOfValue(0)) + IntrOpcode = Intr->AtomicNoRetBaseOpcode; const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = - AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); + AMDGPU::getMIMGBaseOpcodeInfo(IntrOpcode); const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); - unsigned IntrOpcode = Intr->BaseOpcode; bool IsGFX10Plus = AMDGPU::isGFX10Plus(*Subtarget); bool IsGFX11Plus = AMDGPU::isGFX11Plus(*Subtarget); bool IsGFX12Plus = AMDGPU::isGFX12Plus(*Subtarget); SmallVector<EVT, 3> ResultTypes(Op->values()); SmallVector<EVT, 3> OrigResultTypes(Op->values()); + if (BaseOpcode->NoReturn && BaseOpcode->Atomic) + ResultTypes.erase(&ResultTypes[0]); + bool IsD16 = false; bool IsG16 = false; bool IsA16 = false; @@ -9162,8 +9169,10 @@ SDValue SITargetLowering::lowerImage(SDValue Op, VData = Op.getOperand(2); IsAtomicPacked16Bit = - (Intr->BaseOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_F16 || - Intr->BaseOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_BF16); + (IntrOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_F16 || + IntrOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_F16_NORTN || + IntrOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_BF16 || + IntrOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_BF16_NORTN); bool Is64Bit = VData.getValueSizeInBits() == 64; if (BaseOpcode->AtomicX2) { @@ -9173,7 +9182,9 @@ SDValue SITargetLowering::lowerImage(SDValue Op, if (Is64Bit) VData = DAG.getBitcast(MVT::v4i32, VData); - ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32; + if (!BaseOpcode->NoReturn) + ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32; + DMask = Is64Bit ? 0xf : 0x3; NumVDataDwords = Is64Bit ? 4 : 2; } else { @@ -9399,8 +9410,9 @@ SDValue SITargetLowering::lowerImage(SDValue Op, } unsigned CPol = Op.getConstantOperandVal(ArgOffset + Intr->CachePolicyIndex); - if (BaseOpcode->Atomic) - CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization + // Keep GLC only when the atomic's result is actually used. + if (BaseOpcode->Atomic && !BaseOpcode->NoReturn) + CPol |= AMDGPU::CPol::GLC; if (CPol & ~((IsGFX12Plus ? AMDGPU::CPol::ALL : AMDGPU::CPol::ALL_pregfx12) | AMDGPU::CPol::VOLATILE)) return Op; @@ -9512,13 +9524,20 @@ SDValue SITargetLowering::lowerImage(SDValue Op, DAG.setNodeMemRefs(NewNode, {MemRef}); } + if (BaseOpcode->NoReturn) { + if (BaseOpcode->Atomic) + return DAG.getMergeValues( + {DAG.getPOISON(OrigResultTypes[0]), SDValue(NewNode, 0)}, DL); + + return SDValue(NewNode, 0); + } + if (BaseOpcode->AtomicX2) { SmallVector<SDValue, 1> Elt; DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1); return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL); } - if (BaseOpcode->NoReturn) - return SDValue(NewNode, 0); + return constructRetValue(DAG, NewNode, OrigResultTypes, IsTexFail, Subtarget->hasUnpackedD16VMem(), IsD16, DMaskLanes, NumVDataDwords, IsAtomicPacked16Bit, DL); diff --git a/llvm/lib/Target/DirectX/DXIL.td b/llvm/lib/Target/DirectX/DXIL.td index 44c4830..7ae500a 100644 --- a/llvm/lib/Target/DirectX/DXIL.td +++ b/llvm/lib/Target/DirectX/DXIL.td @@ -1058,6 +1058,16 @@ def WaveActiveOp : DXILOp<119, waveActiveOp> { IntrinArgIndex<0>, IntrinArgI8<WaveOpKind_Max>, IntrinArgI8<SignedOpKind_Unsigned> ]>, + IntrinSelect<int_dx_wave_reduce_min, + [ + IntrinArgIndex<0>, IntrinArgI8<WaveOpKind_Min>, + IntrinArgI8<SignedOpKind_Signed> + ]>, + IntrinSelect<int_dx_wave_reduce_umin, + [ + IntrinArgIndex<0>, IntrinArgI8<WaveOpKind_Min>, + IntrinArgI8<SignedOpKind_Unsigned> + ]>, ]; let arguments = [OverloadTy, Int8Ty, Int8Ty]; diff --git a/llvm/lib/Target/DirectX/DXILShaderFlags.cpp b/llvm/lib/Target/DirectX/DXILShaderFlags.cpp index e7e7f2c..ce6e812 100644 --- a/llvm/lib/Target/DirectX/DXILShaderFlags.cpp +++ b/llvm/lib/Target/DirectX/DXILShaderFlags.cpp @@ -94,6 +94,8 @@ static bool checkWaveOps(Intrinsic::ID IID) { case Intrinsic::dx_wave_reduce_usum: case Intrinsic::dx_wave_reduce_max: case Intrinsic::dx_wave_reduce_umax: + case Intrinsic::dx_wave_reduce_min: + case Intrinsic::dx_wave_reduce_umin: return true; } } diff --git a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp index 68fd3e0..60dfd96 100644 --- a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp +++ b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp @@ -55,8 +55,10 @@ bool DirectXTTIImpl::isTargetIntrinsicTriviallyScalarizable( case Intrinsic::dx_splitdouble: case Intrinsic::dx_wave_readlane: case Intrinsic::dx_wave_reduce_max: + case Intrinsic::dx_wave_reduce_min: case Intrinsic::dx_wave_reduce_sum: case Intrinsic::dx_wave_reduce_umax: + case Intrinsic::dx_wave_reduce_umin: case Intrinsic::dx_wave_reduce_usum: case Intrinsic::dx_imad: case Intrinsic::dx_umad: diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp index 4029e14..729c077 100644 --- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp @@ -493,7 +493,7 @@ NVPTXTTIImpl::getInstructionCost(const User *U, // predicate ("@"). return !AsmInst.empty() && (AsmInst[0] == '@' || isAlpha(AsmInst[0]) || - AsmInst.find(".pragma") != StringRef::npos); + AsmInst.contains(".pragma")); }); return InstCount * TargetTransformInfo::TCC_Basic; } diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp index 41a9c92..96e8afc 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp @@ -823,6 +823,7 @@ static bool relaxableFixupNeedsRelocation(const MCFixupKind Kind) { break; case RISCV::fixup_riscv_rvc_jump: case RISCV::fixup_riscv_rvc_branch: + case RISCV::fixup_riscv_rvc_imm: case RISCV::fixup_riscv_jal: return false; } diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp index 6d587e6..5934c91 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp @@ -688,6 +688,7 @@ uint64_t RISCVMCCodeEmitter::getImmOpValue(const MCInst &MI, unsigned OpNo, // the `jal` again in the assembler. } else if (MIFrm == RISCVII::InstFormatCI) { FixupKind = RISCV::fixup_riscv_rvc_imm; + AsmRelaxToLinkerRelaxableWithFeature(RISCV::FeatureVendorXqcili); } else if (MIFrm == RISCVII::InstFormatI) { FixupKind = RISCV::fixup_riscv_12_i; } else if (MIFrm == RISCVII::InstFormatQC_EB) { diff --git a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp index 98b636e..9bd66a4 100644 --- a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp @@ -373,6 +373,26 @@ static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI, .addReg(ScratchReg) .addImm(-1); break; + case AtomicRMWInst::Max: + BuildMI(LoopMBB, DL, TII->get(RISCV::MAX), ScratchReg) + .addReg(DestReg) + .addReg(IncrReg); + break; + case AtomicRMWInst::Min: + BuildMI(LoopMBB, DL, TII->get(RISCV::MIN), ScratchReg) + .addReg(DestReg) + .addReg(IncrReg); + break; + case AtomicRMWInst::UMax: + BuildMI(LoopMBB, DL, TII->get(RISCV::MAXU), ScratchReg) + .addReg(DestReg) + .addReg(IncrReg); + break; + case AtomicRMWInst::UMin: + BuildMI(LoopMBB, DL, TII->get(RISCV::MINU), ScratchReg) + .addReg(DestReg) + .addReg(IncrReg); + break; } BuildMI(LoopMBB, DL, TII->get(getSCForRMW(Ordering, Width, STI)), ScratchReg) .addReg(ScratchReg) @@ -682,6 +702,9 @@ bool RISCVExpandAtomicPseudo::expandAtomicMinMaxOp( MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width, MachineBasicBlock::iterator &NextMBBI) { + // Using MIN(U)/MAX(U) is preferrable if permitted + if (STI->hasPermissiveZalrsc() && STI->hasStdExtZbb() && !IsMasked) + return expandAtomicBinOp(MBB, MBBI, BinOp, IsMasked, Width, NextMBBI); MachineInstr &MI = *MBBI; DebugLoc DL = MI.getDebugLoc(); diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td index 2754d78..b4556f6 100644 --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -1906,6 +1906,25 @@ def FeatureForcedAtomics : SubtargetFeature< def HasAtomicLdSt : Predicate<"Subtarget->hasStdExtZalrsc() || Subtarget->hasForcedAtomics()">; +// The RISC-V Unprivileged Architecture - ISA Volume 1 (Version: 20250508) +// [https://docs.riscv.org/reference/isa/_attachments/riscv-unprivileged.pdf] +// in section 13.3. Eventual Success of Store-Conditional Instructions, defines +// _constrained_ LR/SC loops: +// The dynamic code executed between the LR and SC instructions can only +// contain instructions from the base ''I'' instruction set, excluding loads, +// stores, backward jumps, taken backward branches, JALR, FENCE, and SYSTEM +// instructions. Compressed forms of the aforementioned ''I'' instructions in +// the Zca and Zcb extensions are also permitted. +// LR/SC loops that do not adhere to the above are _unconstrained_ LR/SC loops, +// and success is implementation specific. For implementations which know that +// non-base instructions (such as the ''B'' extension) will not violate any +// forward progress guarantees, using these instructions to reduce the LR/SC +// sequence length is desirable. +def FeaturePermissiveZalrsc + : SubtargetFeature< + "permissive-zalrsc", "HasPermissiveZalrsc", "true", + "Implementation permits non-base instructions between LR/SC pairs">; + def FeatureTaggedGlobals : SubtargetFeature<"tagged-globals", "AllowTaggedGlobals", "true", "Use an instruction sequence for taking the address of a global " diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp index 6181abb..47022b3 100644 --- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp @@ -745,7 +745,7 @@ Register SPIRVGlobalRegistry::buildGlobalVariable( .addDef(ResVReg) .addUse(getSPIRVTypeID(BaseType)) .addImm(static_cast<uint32_t>(Storage)); - if (Init != 0) + if (Init) MIB.addUse(Init->getOperand(0).getReg()); // ISel may introduce a new register on this step, so we need to add it to // DT and correct its type avoiding fails on the next stage. diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp index 021353a..3fea21e 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp @@ -222,6 +222,9 @@ private: bool selectWaveReduceMax(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, bool IsUnsigned) const; + bool selectWaveReduceMin(Register ResVReg, const SPIRVType *ResType, + MachineInstr &I, bool IsUnsigned) const; + bool selectWaveReduceSum(Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const; @@ -2456,6 +2459,35 @@ bool SPIRVInstructionSelector::selectWaveReduceMax(Register ResVReg, .constrainAllUses(TII, TRI, RBI); } +bool SPIRVInstructionSelector::selectWaveReduceMin(Register ResVReg, + const SPIRVType *ResType, + MachineInstr &I, + bool IsUnsigned) const { + assert(I.getNumOperands() == 3); + assert(I.getOperand(2).isReg()); + MachineBasicBlock &BB = *I.getParent(); + Register InputRegister = I.getOperand(2).getReg(); + SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister); + + if (!InputType) + report_fatal_error("Input Type could not be determined."); + + SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32, I, TII); + // Retreive the operation to use based on input type + bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat); + auto IntegerOpcodeType = + IsUnsigned ? SPIRV::OpGroupNonUniformUMin : SPIRV::OpGroupNonUniformSMin; + auto Opcode = IsFloatTy ? SPIRV::OpGroupNonUniformFMin : IntegerOpcodeType; + return BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup, I, IntTy, TII, + !STI.isShader())) + .addImm(SPIRV::GroupOperation::Reduce) + .addUse(I.getOperand(2).getReg()) + .constrainAllUses(TII, TRI, RBI); +} + bool SPIRVInstructionSelector::selectWaveReduceSum(Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const { @@ -3431,6 +3463,10 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg, return selectWaveReduceMax(ResVReg, ResType, I, /*IsUnsigned*/ true); case Intrinsic::spv_wave_reduce_max: return selectWaveReduceMax(ResVReg, ResType, I, /*IsUnsigned*/ false); + case Intrinsic::spv_wave_reduce_umin: + return selectWaveReduceMin(ResVReg, ResType, I, /*IsUnsigned*/ true); + case Intrinsic::spv_wave_reduce_min: + return selectWaveReduceMin(ResVReg, ResType, I, /*IsUnsigned*/ false); case Intrinsic::spv_wave_reduce_sum: return selectWaveReduceSum(ResVReg, ResType, I); case Intrinsic::spv_wave_readlane: diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp index 3da720f..58109ac 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -8973,8 +8973,7 @@ SystemZTargetLowering::getJumpConditionMergingParams(Instruction::BinaryOps Opc, if (const auto *CB = dyn_cast<CallBase>(RHSVal)) { if (CB->isInlineAsm()) { const InlineAsm *IA = cast<InlineAsm>(CB->getCalledOperand()); - return IA && - IA->getConstraintString().find("{@cc}") != std::string::npos; + return IA && IA->getConstraintString().contains("{@cc}"); } } } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index b86020a..5785440 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -54679,7 +54679,8 @@ static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG, SDValue NewPtr = DAG.getMemBasePlusOffset( Ld->getBasePtr(), PtrByteOfs, DL, SDNodeFlags::NoUnsignedWrap); SDValue NewLoad = - DAG.getLoad(VT, DL, Ld->getChain(), NewPtr, Ld->getMemOperand()); + DAG.getLoad(VT, DL, Ld->getChain(), NewPtr, Ld->getPointerInfo(), + Align(), Ld->getMemOperand()->getFlags()); DAG.ReplaceAllUsesOfValueWith(Src.getOperand(0).getValue(1), NewLoad.getValue(1)); return NewLoad; diff --git a/llvm/lib/TargetParser/Host.cpp b/llvm/lib/TargetParser/Host.cpp index c8d1938..0849fc7 100644 --- a/llvm/lib/TargetParser/Host.cpp +++ b/llvm/lib/TargetParser/Host.cpp @@ -1179,7 +1179,7 @@ static const char *getAMDProcessorTypeAndSubtype(unsigned Family, const unsigned *Features, unsigned *Type, unsigned *Subtype) { - const char *CPU = 0; + const char *CPU = nullptr; switch (Family) { case 4: diff --git a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp index 042578d..6a11aec 100644 --- a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp +++ b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp @@ -380,7 +380,7 @@ bool ExpandVariadics::runOnModule(Module &M) { if (CB->isIndirectCall()) { FunctionType *FTy = CB->getFunctionType(); if (FTy->isVarArg()) - Changed |= expandCall(M, Builder, CB, FTy, 0); + Changed |= expandCall(M, Builder, CB, FTy, /*NF=*/nullptr); } } } diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index c537be5c..4fac5d3 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -5228,32 +5228,52 @@ bool SimplifyCFGOpt::simplifyBranchOnICmpChain(BranchInst *BI, CompVal, DL.getIntPtrType(CompVal->getType()), "magicptr"); } - // Create the new switch instruction now. - SwitchInst *New = Builder.CreateSwitch(CompVal, DefaultBB, Values.size()); - if (HasProfile) { - // We know the weight of the default case. We don't know the weight of the - // other cases, but rather than completely lose profiling info, we split - // the remaining probability equally over them. - SmallVector<uint32_t> NewWeights(Values.size() + 1); - NewWeights[0] = BranchWeights[1]; // this is the default, and we swapped if - // TrueWhenEqual. - for (auto &V : drop_begin(NewWeights)) - V = BranchWeights[0] / Values.size(); - setBranchWeights(*New, NewWeights, /*IsExpected=*/false); - } - - // Add all of the 'cases' to the switch instruction. - for (ConstantInt *Val : Values) - New->addCase(Val, EdgeBB); - - // We added edges from PI to the EdgeBB. As such, if there were any - // PHI nodes in EdgeBB, they need entries to be added corresponding to - // the number of edges added. - for (BasicBlock::iterator BBI = EdgeBB->begin(); isa<PHINode>(BBI); ++BBI) { - PHINode *PN = cast<PHINode>(BBI); - Value *InVal = PN->getIncomingValueForBlock(BB); - for (unsigned i = 0, e = Values.size() - 1; i != e; ++i) - PN->addIncoming(InVal, BB); + // Check if we can represent the values as a contiguous range. If so, we use a + // range check + conditional branch instead of a switch. + if (Values.front()->getValue() - Values.back()->getValue() == + Values.size() - 1) { + ConstantRange RangeToCheck = ConstantRange::getNonEmpty( + Values.back()->getValue(), Values.front()->getValue() + 1); + APInt Offset, RHS; + ICmpInst::Predicate Pred; + RangeToCheck.getEquivalentICmp(Pred, RHS, Offset); + Value *X = CompVal; + if (!Offset.isZero()) + X = Builder.CreateAdd(X, ConstantInt::get(CompVal->getType(), Offset)); + Value *Cond = + Builder.CreateICmp(Pred, X, ConstantInt::get(CompVal->getType(), RHS)); + BranchInst *NewBI = Builder.CreateCondBr(Cond, EdgeBB, DefaultBB); + if (HasProfile) + setBranchWeights(*NewBI, BranchWeights, /*IsExpected=*/false); + // We don't need to update PHI nodes since we don't add any new edges. + } else { + // Create the new switch instruction now. + SwitchInst *New = Builder.CreateSwitch(CompVal, DefaultBB, Values.size()); + if (HasProfile) { + // We know the weight of the default case. We don't know the weight of the + // other cases, but rather than completely lose profiling info, we split + // the remaining probability equally over them. + SmallVector<uint32_t> NewWeights(Values.size() + 1); + NewWeights[0] = BranchWeights[1]; // this is the default, and we swapped + // if TrueWhenEqual. + for (auto &V : drop_begin(NewWeights)) + V = BranchWeights[0] / Values.size(); + setBranchWeights(*New, NewWeights, /*IsExpected=*/false); + } + + // Add all of the 'cases' to the switch instruction. + for (ConstantInt *Val : Values) + New->addCase(Val, EdgeBB); + + // We added edges from PI to the EdgeBB. As such, if there were any + // PHI nodes in EdgeBB, they need entries to be added corresponding to + // the number of edges added. + for (BasicBlock::iterator BBI = EdgeBB->begin(); isa<PHINode>(BBI); ++BBI) { + PHINode *PN = cast<PHINode>(BBI); + Value *InVal = PN->getIncomingValueForBlock(BB); + for (unsigned i = 0, e = Values.size() - 1; i != e; ++i) + PN->addIncoming(InVal, BB); + } } // Erase the old branch instruction. diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 4fcaf6d..43166c0 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -5608,6 +5608,7 @@ private: for (ScheduleBundle *Bundle : Bundles) { if (ScheduleCopyableDataMap.empty() && TotalOpCount == 0) break; + SmallPtrSet<Value *, 4> ParentsUniqueUsers; // Need to search for the lane since the tree entry can be // reordered. auto *It = find(Bundle->getTreeEntry()->Scalars, In); @@ -5636,6 +5637,22 @@ private: Bundle->getTreeEntry()->isCopyableElement(In)) && "Missed TreeEntry operands?"); + bool IsNonSchedulableWithParentPhiNode = + Bundle->getTreeEntry()->doesNotNeedToSchedule() && + Bundle->getTreeEntry()->UserTreeIndex && + Bundle->getTreeEntry()->UserTreeIndex.UserTE->hasState() && + Bundle->getTreeEntry()->UserTreeIndex.UserTE->getOpcode() == + Instruction::PHI; + // Count the number of unique phi nodes, which are the parent for + // parent entry, and exit, if all the unique phis are processed. + if (IsNonSchedulableWithParentPhiNode) { + const TreeEntry *ParentTE = + Bundle->getTreeEntry()->UserTreeIndex.UserTE; + Value *User = ParentTE->Scalars[Lane]; + if (!ParentsUniqueUsers.insert(User).second) + break; + } + for (unsigned OpIdx : seq<unsigned>(Bundle->getTreeEntry()->getNumOperands())) if (auto *I = dyn_cast<Instruction>( @@ -5644,8 +5661,8 @@ private: << *I << "\n"); DecrUnschedForInst(I, Bundle->getTreeEntry(), OpIdx, Checked); } - // If parent node is schedulable, it will be handle correctly. - if (!Bundle->getTreeEntry()->doesNotNeedToSchedule()) + // If parent node is schedulable, it will be handled correctly. + if (!IsNonSchedulableWithParentPhiNode) break; It = std::find(std::next(It), Bundle->getTreeEntry()->Scalars.end(), In); diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index acad795..d9ac26bb 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -3648,6 +3648,37 @@ tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red, Sub = VecOp->getDefiningRecipe(); VecOp = Tmp; } + + // If ValB is a constant and can be safely extended, truncate it to the same + // type as ExtA's operand, then extend it to the same type as ExtA. This + // creates two uniform extends that can more easily be matched by the rest of + // the bundling code. The ExtB reference, ValB and operand 1 of Mul are all + // replaced with the new extend of the constant. + auto ExtendAndReplaceConstantOp = [&Ctx](VPWidenCastRecipe *ExtA, + VPWidenCastRecipe *&ExtB, + VPValue *&ValB, VPWidenRecipe *Mul) { + if (!ExtA || ExtB || !ValB->isLiveIn()) + return; + Type *NarrowTy = Ctx.Types.inferScalarType(ExtA->getOperand(0)); + Instruction::CastOps ExtOpc = ExtA->getOpcode(); + const APInt *Const; + if (!match(ValB, m_APInt(Const)) || + !llvm::canConstantBeExtended( + Const, NarrowTy, TTI::getPartialReductionExtendKind(ExtOpc))) + return; + // The truncate ensures that the type of each extended operand is the + // same, and it's been proven that the constant can be extended from + // NarrowTy safely. Necessary since ExtA's extended operand would be + // e.g. an i8, while the const will likely be an i32. This will be + // elided by later optimisations. + VPBuilder Builder(Mul); + auto *Trunc = + Builder.createWidenCast(Instruction::CastOps::Trunc, ValB, NarrowTy); + Type *WideTy = Ctx.Types.inferScalarType(ExtA); + ValB = ExtB = Builder.createWidenCast(ExtOpc, Trunc, WideTy); + Mul->setOperand(1, ExtB); + }; + // Try to match reduce.add(mul(...)). if (match(VecOp, m_Mul(m_VPValue(A), m_VPValue(B)))) { auto *RecipeA = @@ -3656,6 +3687,9 @@ tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red, dyn_cast_if_present<VPWidenCastRecipe>(B->getDefiningRecipe()); auto *Mul = cast<VPWidenRecipe>(VecOp->getDefiningRecipe()); + // Convert reduce.add(mul(ext, const)) to reduce.add(mul(ext, ext(const))) + ExtendAndReplaceConstantOp(RecipeA, RecipeB, B, Mul); + // Match reduce.add/sub(mul(ext, ext)). if (RecipeA && RecipeB && match(RecipeA, m_ZExtOrSExt(m_VPValue())) && match(RecipeB, m_ZExtOrSExt(m_VPValue())) && @@ -3665,7 +3699,6 @@ tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red, cast<VPWidenRecipe>(Sub), Red); return new VPExpressionRecipe(RecipeA, RecipeB, Mul, Red); } - // Match reduce.add(mul). // TODO: Add an expression type for this variant with a negated mul if (!Sub && IsMulAccValidAndClampRange(Mul, nullptr, nullptr, nullptr)) return new VPExpressionRecipe(Mul, Red); @@ -3674,18 +3707,26 @@ tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red, // variants. if (Sub) return nullptr; - // Match reduce.add(ext(mul(ext(A), ext(B)))). - // All extend recipes must have same opcode or A == B - // which can be transform to reduce.add(zext(mul(sext(A), sext(B)))). - if (match(VecOp, m_ZExtOrSExt(m_Mul(m_ZExtOrSExt(m_VPValue()), - m_ZExtOrSExt(m_VPValue()))))) { + + // Match reduce.add(ext(mul(A, B))). + if (match(VecOp, m_ZExtOrSExt(m_Mul(m_VPValue(A), m_VPValue(B))))) { auto *Ext = cast<VPWidenCastRecipe>(VecOp->getDefiningRecipe()); auto *Mul = cast<VPWidenRecipe>(Ext->getOperand(0)->getDefiningRecipe()); - auto *Ext0 = - cast<VPWidenCastRecipe>(Mul->getOperand(0)->getDefiningRecipe()); - auto *Ext1 = - cast<VPWidenCastRecipe>(Mul->getOperand(1)->getDefiningRecipe()); - if ((Ext->getOpcode() == Ext0->getOpcode() || Ext0 == Ext1) && + auto *Ext0 = dyn_cast_if_present<VPWidenCastRecipe>(A->getDefiningRecipe()); + auto *Ext1 = dyn_cast_if_present<VPWidenCastRecipe>(B->getDefiningRecipe()); + + // reduce.add(ext(mul(ext, const))) + // -> reduce.add(ext(mul(ext, ext(const)))) + ExtendAndReplaceConstantOp(Ext0, Ext1, B, Mul); + + // reduce.add(ext(mul(ext(A), ext(B)))) + // -> reduce.add(mul(wider_ext(A), wider_ext(B))) + // The inner extends must either have the same opcode as the outer extend or + // be the same, in which case the multiply can never result in a negative + // value and the outer extend can be folded away by doing wider + // extends for the operands of the mul. + if (Ext0 && Ext1 && + (Ext->getOpcode() == Ext0->getOpcode() || Ext0 == Ext1) && Ext0->getOpcode() == Ext1->getOpcode() && IsMulAccValidAndClampRange(Mul, Ext0, Ext1, Ext) && Mul->hasOneUse()) { auto *NewExt0 = new VPWidenCastRecipe( diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/split-wide-shifts-multiway.ll b/llvm/test/CodeGen/AArch64/GlobalISel/split-wide-shifts-multiway.ll index 41f7ab8..480fcbd 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/split-wide-shifts-multiway.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/split-wide-shifts-multiway.ll @@ -4992,28 +4992,21 @@ define void @test_shl_i512_const_32(ptr %result, ptr %input) { ; GISEL-LABEL: test_shl_i512_const_32: ; GISEL: ; %bb.0: ; %entry ; GISEL-NEXT: ldp x8, x9, [x1] -; GISEL-NEXT: ldp x11, x12, [x1, #16] -; GISEL-NEXT: ldp x14, x15, [x1, #32] -; GISEL-NEXT: lsr x10, x8, #32 -; GISEL-NEXT: lsr x13, x9, #32 -; GISEL-NEXT: lsl x8, x8, #32 -; GISEL-NEXT: orr x9, x10, x9, lsl #32 -; GISEL-NEXT: lsr x10, x11, #32 -; GISEL-NEXT: orr x11, x13, x11, lsl #32 -; GISEL-NEXT: ldp x13, x16, [x1, #48] -; GISEL-NEXT: stp x8, x9, [x0] -; GISEL-NEXT: lsr x8, x12, #32 -; GISEL-NEXT: orr x10, x10, x12, lsl #32 -; GISEL-NEXT: lsr x12, x14, #32 -; GISEL-NEXT: lsr x9, x15, #32 -; GISEL-NEXT: orr x8, x8, x14, lsl #32 -; GISEL-NEXT: stp x11, x10, [x0, #16] -; GISEL-NEXT: orr x11, x12, x15, lsl #32 -; GISEL-NEXT: lsr x12, x13, #32 -; GISEL-NEXT: orr x9, x9, x13, lsl #32 -; GISEL-NEXT: stp x8, x11, [x0, #32] -; GISEL-NEXT: orr x8, x12, x16, lsl #32 -; GISEL-NEXT: stp x9, x8, [x0, #48] +; GISEL-NEXT: ldp x10, x11, [x1, #16] +; GISEL-NEXT: ldp x13, x14, [x1, #32] +; GISEL-NEXT: lsl x12, x8, #32 +; GISEL-NEXT: extr x8, x9, x8, #32 +; GISEL-NEXT: extr x9, x10, x9, #32 +; GISEL-NEXT: extr x10, x11, x10, #32 +; GISEL-NEXT: ldp x15, x16, [x1, #48] +; GISEL-NEXT: stp x12, x8, [x0] +; GISEL-NEXT: extr x8, x13, x11, #32 +; GISEL-NEXT: stp x9, x10, [x0, #16] +; GISEL-NEXT: extr x9, x14, x13, #32 +; GISEL-NEXT: extr x10, x15, x14, #32 +; GISEL-NEXT: stp x8, x9, [x0, #32] +; GISEL-NEXT: extr x8, x16, x15, #32 +; GISEL-NEXT: stp x10, x8, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -5044,30 +5037,22 @@ define void @test_lshr_i512_const_32(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_lshr_i512_const_32: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #8] -; GISEL-NEXT: ldr x11, [x1] -; GISEL-NEXT: ldp x10, x14, [x1, #24] -; GISEL-NEXT: ldr x16, [x1, #56] -; GISEL-NEXT: lsl x12, x8, #32 -; GISEL-NEXT: lsl x13, x9, #32 -; GISEL-NEXT: lsl x15, x10, #32 -; GISEL-NEXT: orr x11, x12, x11, lsr #32 -; GISEL-NEXT: orr x8, x13, x8, lsr #32 -; GISEL-NEXT: lsl x13, x14, #32 -; GISEL-NEXT: orr x9, x15, x9, lsr #32 -; GISEL-NEXT: ldp x12, x15, [x1, #40] -; GISEL-NEXT: stp x11, x8, [x0] -; GISEL-NEXT: orr x10, x13, x10, lsr #32 -; GISEL-NEXT: lsl x8, x16, #32 -; GISEL-NEXT: lsl x11, x12, #32 -; GISEL-NEXT: lsl x13, x15, #32 -; GISEL-NEXT: stp x9, x10, [x0, #16] -; GISEL-NEXT: orr x8, x8, x15, lsr #32 -; GISEL-NEXT: lsr x10, x16, #32 -; GISEL-NEXT: orr x11, x11, x14, lsr #32 -; GISEL-NEXT: orr x9, x13, x12, lsr #32 -; GISEL-NEXT: stp x8, x10, [x0, #48] -; GISEL-NEXT: stp x11, x9, [x0, #32] +; GISEL-NEXT: ldp x8, x9, [x1] +; GISEL-NEXT: ldp x10, x11, [x1, #16] +; GISEL-NEXT: ldp x12, x13, [x1, #32] +; GISEL-NEXT: extr x8, x9, x8, #32 +; GISEL-NEXT: ldp x14, x15, [x1, #48] +; GISEL-NEXT: extr x9, x10, x9, #32 +; GISEL-NEXT: extr x10, x11, x10, #32 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: extr x8, x12, x11, #32 +; GISEL-NEXT: extr x9, x13, x12, #32 +; GISEL-NEXT: stp x10, x8, [x0, #16] +; GISEL-NEXT: extr x10, x14, x13, #32 +; GISEL-NEXT: extr x8, x15, x14, #32 +; GISEL-NEXT: stp x9, x10, [x0, #32] +; GISEL-NEXT: lsr x9, x15, #32 +; GISEL-NEXT: stp x8, x9, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -5098,32 +5083,24 @@ define void @test_ashr_i512_const_32(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_ashr_i512_const_32: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #8] -; GISEL-NEXT: ldr x11, [x1] -; GISEL-NEXT: ldp x10, x13, [x1, #24] -; GISEL-NEXT: ldr x17, [x1, #56] -; GISEL-NEXT: lsl x12, x8, #32 -; GISEL-NEXT: lsl x15, x9, #32 -; GISEL-NEXT: lsl x16, x10, #32 -; GISEL-NEXT: orr x11, x12, x11, lsr #32 -; GISEL-NEXT: ldp x14, x12, [x1, #40] -; GISEL-NEXT: orr x8, x15, x8, lsr #32 -; GISEL-NEXT: lsl x15, x13, #32 -; GISEL-NEXT: orr x9, x16, x9, lsr #32 -; GISEL-NEXT: asr x16, x17, #63 -; GISEL-NEXT: stp x11, x8, [x0] -; GISEL-NEXT: lsl x11, x14, #32 -; GISEL-NEXT: orr x10, x15, x10, lsr #32 -; GISEL-NEXT: lsl x15, x12, #32 -; GISEL-NEXT: orr x8, x11, x13, lsr #32 -; GISEL-NEXT: lsl x11, x17, #32 -; GISEL-NEXT: stp x9, x10, [x0, #16] -; GISEL-NEXT: orr x9, x15, x14, lsr #32 -; GISEL-NEXT: lsl x13, x16, #32 -; GISEL-NEXT: orr x10, x11, x12, lsr #32 -; GISEL-NEXT: stp x8, x9, [x0, #32] -; GISEL-NEXT: orr x8, x13, x17, asr #32 -; GISEL-NEXT: stp x10, x8, [x0, #48] +; GISEL-NEXT: ldp x8, x9, [x1] +; GISEL-NEXT: ldp x10, x11, [x1, #16] +; GISEL-NEXT: ldp x12, x13, [x1, #48] +; GISEL-NEXT: extr x8, x9, x8, #32 +; GISEL-NEXT: ldp x14, x15, [x1, #32] +; GISEL-NEXT: extr x9, x10, x9, #32 +; GISEL-NEXT: extr x10, x11, x10, #32 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: asr x8, x13, #63 +; GISEL-NEXT: extr x11, x14, x11, #32 +; GISEL-NEXT: extr x9, x15, x14, #32 +; GISEL-NEXT: lsl x8, x8, #32 +; GISEL-NEXT: stp x10, x11, [x0, #16] +; GISEL-NEXT: extr x10, x12, x15, #32 +; GISEL-NEXT: extr x11, x13, x12, #32 +; GISEL-NEXT: orr x8, x8, x13, asr #32 +; GISEL-NEXT: stp x9, x10, [x0, #32] +; GISEL-NEXT: stp x11, x8, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -5252,23 +5229,17 @@ define void @test_shl_i512_const_96(ptr %result, ptr %input) { ; GISEL-NEXT: ldr x15, [x1, #48] ; GISEL-NEXT: ldp x10, x11, [x1, #16] ; GISEL-NEXT: ldp x12, x13, [x1, #32] -; GISEL-NEXT: lsr x14, x8, #32 -; GISEL-NEXT: lsr x16, x9, #32 -; GISEL-NEXT: lsl x8, x8, #32 -; GISEL-NEXT: orr x9, x14, x9, lsl #32 -; GISEL-NEXT: lsr x14, x10, #32 -; GISEL-NEXT: orr x10, x16, x10, lsl #32 -; GISEL-NEXT: stp xzr, x8, [x0] -; GISEL-NEXT: lsr x8, x11, #32 -; GISEL-NEXT: orr x11, x14, x11, lsl #32 -; GISEL-NEXT: lsr x14, x12, #32 -; GISEL-NEXT: stp x9, x10, [x0, #16] -; GISEL-NEXT: lsr x9, x13, #32 -; GISEL-NEXT: orr x8, x8, x12, lsl #32 -; GISEL-NEXT: orr x10, x14, x13, lsl #32 -; GISEL-NEXT: orr x9, x9, x15, lsl #32 -; GISEL-NEXT: stp x11, x8, [x0, #32] -; GISEL-NEXT: stp x10, x9, [x0, #48] +; GISEL-NEXT: lsl x14, x8, #32 +; GISEL-NEXT: extr x8, x9, x8, #32 +; GISEL-NEXT: extr x9, x10, x9, #32 +; GISEL-NEXT: extr x10, x11, x10, #32 +; GISEL-NEXT: stp xzr, x14, [x0] +; GISEL-NEXT: stp x8, x9, [x0, #16] +; GISEL-NEXT: extr x8, x12, x11, #32 +; GISEL-NEXT: extr x9, x13, x12, #32 +; GISEL-NEXT: stp x10, x8, [x0, #32] +; GISEL-NEXT: extr x10, x15, x13, #32 +; GISEL-NEXT: stp x9, x10, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -5297,27 +5268,21 @@ define void @test_lshr_i512_const_96(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_lshr_i512_const_96: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #16] -; GISEL-NEXT: ldr x10, [x1, #8] -; GISEL-NEXT: ldp x11, x14, [x1, #32] -; GISEL-NEXT: ldp x15, x16, [x1, #48] -; GISEL-NEXT: lsl x12, x8, #32 -; GISEL-NEXT: lsl x13, x9, #32 -; GISEL-NEXT: orr x10, x12, x10, lsr #32 -; GISEL-NEXT: lsl x12, x11, #32 -; GISEL-NEXT: orr x8, x13, x8, lsr #32 -; GISEL-NEXT: lsl x13, x14, #32 -; GISEL-NEXT: orr x9, x12, x9, lsr #32 -; GISEL-NEXT: stp x10, x8, [x0] -; GISEL-NEXT: lsl x10, x15, #32 -; GISEL-NEXT: orr x11, x13, x11, lsr #32 -; GISEL-NEXT: lsl x12, x16, #32 -; GISEL-NEXT: orr x8, x10, x14, lsr #32 -; GISEL-NEXT: lsr x10, x16, #32 -; GISEL-NEXT: stp x9, x11, [x0, #16] -; GISEL-NEXT: orr x9, x12, x15, lsr #32 -; GISEL-NEXT: stp x10, xzr, [x0, #48] -; GISEL-NEXT: stp x8, x9, [x0, #32] +; GISEL-NEXT: ldp x8, x9, [x1, #8] +; GISEL-NEXT: ldr x14, [x1, #56] +; GISEL-NEXT: ldp x10, x11, [x1, #24] +; GISEL-NEXT: ldp x12, x13, [x1, #40] +; GISEL-NEXT: extr x8, x9, x8, #32 +; GISEL-NEXT: extr x9, x10, x9, #32 +; GISEL-NEXT: extr x10, x11, x10, #32 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: extr x8, x12, x11, #32 +; GISEL-NEXT: extr x9, x13, x12, #32 +; GISEL-NEXT: stp x10, x8, [x0, #16] +; GISEL-NEXT: extr x10, x14, x13, #32 +; GISEL-NEXT: lsr x8, x14, #32 +; GISEL-NEXT: stp x9, x10, [x0, #32] +; GISEL-NEXT: stp x8, xzr, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -5347,29 +5312,23 @@ define void @test_ashr_i512_const_96(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_ashr_i512_const_96: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #16] -; GISEL-NEXT: ldr x11, [x1, #8] -; GISEL-NEXT: ldp x10, x13, [x1, #32] -; GISEL-NEXT: lsl x12, x8, #32 -; GISEL-NEXT: lsl x14, x9, #32 -; GISEL-NEXT: lsl x15, x10, #32 -; GISEL-NEXT: orr x11, x12, x11, lsr #32 -; GISEL-NEXT: ldp x12, x16, [x1, #48] -; GISEL-NEXT: orr x8, x14, x8, lsr #32 -; GISEL-NEXT: lsl x14, x13, #32 -; GISEL-NEXT: orr x9, x15, x9, lsr #32 -; GISEL-NEXT: asr x15, x16, #63 -; GISEL-NEXT: stp x11, x8, [x0] -; GISEL-NEXT: lsl x11, x12, #32 -; GISEL-NEXT: orr x10, x14, x10, lsr #32 -; GISEL-NEXT: lsl x14, x16, #32 -; GISEL-NEXT: orr x8, x11, x13, lsr #32 +; GISEL-NEXT: ldp x8, x9, [x1, #8] +; GISEL-NEXT: ldr x13, [x1, #40] +; GISEL-NEXT: ldp x10, x11, [x1, #24] +; GISEL-NEXT: ldp x14, x12, [x1, #48] +; GISEL-NEXT: extr x8, x9, x8, #32 +; GISEL-NEXT: extr x9, x10, x9, #32 +; GISEL-NEXT: extr x10, x11, x10, #32 +; GISEL-NEXT: asr x15, x12, #63 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: extr x8, x13, x11, #32 +; GISEL-NEXT: extr x9, x14, x13, #32 ; GISEL-NEXT: lsl x11, x15, #32 -; GISEL-NEXT: stp x9, x10, [x0, #16] -; GISEL-NEXT: orr x9, x14, x12, lsr #32 -; GISEL-NEXT: orr x10, x11, x16, asr #32 -; GISEL-NEXT: stp x8, x9, [x0, #32] -; GISEL-NEXT: stp x10, x15, [x0, #48] +; GISEL-NEXT: stp x10, x8, [x0, #16] +; GISEL-NEXT: extr x10, x12, x14, #32 +; GISEL-NEXT: orr x8, x11, x12, asr #32 +; GISEL-NEXT: stp x9, x10, [x0, #32] +; GISEL-NEXT: stp x8, x15, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -5404,28 +5363,21 @@ define void @test_shl_i512_const_1(ptr %result, ptr %input) { ; GISEL-LABEL: test_shl_i512_const_1: ; GISEL: ; %bb.0: ; %entry ; GISEL-NEXT: ldp x8, x9, [x1] -; GISEL-NEXT: ldp x11, x12, [x1, #16] -; GISEL-NEXT: ldp x14, x15, [x1, #32] -; GISEL-NEXT: lsr x10, x8, #63 -; GISEL-NEXT: lsr x13, x9, #63 -; GISEL-NEXT: lsl x8, x8, #1 -; GISEL-NEXT: orr x9, x10, x9, lsl #1 -; GISEL-NEXT: lsr x10, x11, #63 -; GISEL-NEXT: orr x11, x13, x11, lsl #1 -; GISEL-NEXT: ldp x13, x16, [x1, #48] -; GISEL-NEXT: stp x8, x9, [x0] -; GISEL-NEXT: lsr x8, x12, #63 -; GISEL-NEXT: orr x10, x10, x12, lsl #1 -; GISEL-NEXT: lsr x12, x14, #63 -; GISEL-NEXT: lsr x9, x15, #63 -; GISEL-NEXT: orr x8, x8, x14, lsl #1 -; GISEL-NEXT: stp x11, x10, [x0, #16] -; GISEL-NEXT: orr x11, x12, x15, lsl #1 -; GISEL-NEXT: lsr x12, x13, #63 -; GISEL-NEXT: orr x9, x9, x13, lsl #1 -; GISEL-NEXT: stp x8, x11, [x0, #32] -; GISEL-NEXT: orr x8, x12, x16, lsl #1 -; GISEL-NEXT: stp x9, x8, [x0, #48] +; GISEL-NEXT: ldp x10, x11, [x1, #16] +; GISEL-NEXT: ldp x13, x14, [x1, #32] +; GISEL-NEXT: lsl x12, x8, #1 +; GISEL-NEXT: extr x8, x9, x8, #63 +; GISEL-NEXT: extr x9, x10, x9, #63 +; GISEL-NEXT: extr x10, x11, x10, #63 +; GISEL-NEXT: ldp x15, x16, [x1, #48] +; GISEL-NEXT: stp x12, x8, [x0] +; GISEL-NEXT: extr x8, x13, x11, #63 +; GISEL-NEXT: stp x9, x10, [x0, #16] +; GISEL-NEXT: extr x9, x14, x13, #63 +; GISEL-NEXT: extr x10, x15, x14, #63 +; GISEL-NEXT: stp x8, x9, [x0, #32] +; GISEL-NEXT: extr x8, x16, x15, #63 +; GISEL-NEXT: stp x10, x8, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -5457,30 +5409,22 @@ define void @test_lshr_i512_const_1(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_lshr_i512_const_1: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #8] -; GISEL-NEXT: ldr x11, [x1] -; GISEL-NEXT: ldp x10, x14, [x1, #24] -; GISEL-NEXT: ldr x16, [x1, #56] -; GISEL-NEXT: lsl x12, x8, #63 -; GISEL-NEXT: lsl x13, x9, #63 -; GISEL-NEXT: lsl x15, x10, #63 -; GISEL-NEXT: orr x11, x12, x11, lsr #1 -; GISEL-NEXT: orr x8, x13, x8, lsr #1 -; GISEL-NEXT: lsl x13, x14, #63 -; GISEL-NEXT: orr x9, x15, x9, lsr #1 -; GISEL-NEXT: ldp x12, x15, [x1, #40] -; GISEL-NEXT: stp x11, x8, [x0] -; GISEL-NEXT: orr x10, x13, x10, lsr #1 -; GISEL-NEXT: lsl x8, x16, #63 -; GISEL-NEXT: lsl x11, x12, #63 -; GISEL-NEXT: lsl x13, x15, #63 -; GISEL-NEXT: stp x9, x10, [x0, #16] -; GISEL-NEXT: orr x8, x8, x15, lsr #1 -; GISEL-NEXT: lsr x10, x16, #1 -; GISEL-NEXT: orr x11, x11, x14, lsr #1 -; GISEL-NEXT: orr x9, x13, x12, lsr #1 -; GISEL-NEXT: stp x8, x10, [x0, #48] -; GISEL-NEXT: stp x11, x9, [x0, #32] +; GISEL-NEXT: ldp x8, x9, [x1] +; GISEL-NEXT: ldp x10, x11, [x1, #16] +; GISEL-NEXT: ldp x12, x13, [x1, #32] +; GISEL-NEXT: extr x8, x9, x8, #1 +; GISEL-NEXT: ldp x14, x15, [x1, #48] +; GISEL-NEXT: extr x9, x10, x9, #1 +; GISEL-NEXT: extr x10, x11, x10, #1 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: extr x8, x12, x11, #1 +; GISEL-NEXT: extr x9, x13, x12, #1 +; GISEL-NEXT: stp x10, x8, [x0, #16] +; GISEL-NEXT: extr x10, x14, x13, #1 +; GISEL-NEXT: extr x8, x15, x14, #1 +; GISEL-NEXT: stp x9, x10, [x0, #32] +; GISEL-NEXT: lsr x9, x15, #1 +; GISEL-NEXT: stp x8, x9, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -5512,32 +5456,24 @@ define void @test_ashr_i512_const_1(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_ashr_i512_const_1: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #8] -; GISEL-NEXT: ldr x11, [x1] -; GISEL-NEXT: ldp x10, x13, [x1, #24] -; GISEL-NEXT: ldr x17, [x1, #56] -; GISEL-NEXT: lsl x12, x8, #63 -; GISEL-NEXT: lsl x15, x9, #63 -; GISEL-NEXT: lsl x16, x10, #63 -; GISEL-NEXT: orr x11, x12, x11, lsr #1 -; GISEL-NEXT: ldp x14, x12, [x1, #40] -; GISEL-NEXT: orr x8, x15, x8, lsr #1 -; GISEL-NEXT: lsl x15, x13, #63 -; GISEL-NEXT: orr x9, x16, x9, lsr #1 -; GISEL-NEXT: asr x16, x17, #63 -; GISEL-NEXT: stp x11, x8, [x0] -; GISEL-NEXT: lsl x11, x14, #63 -; GISEL-NEXT: orr x10, x15, x10, lsr #1 -; GISEL-NEXT: lsl x15, x12, #63 -; GISEL-NEXT: orr x8, x11, x13, lsr #1 -; GISEL-NEXT: lsl x11, x17, #63 -; GISEL-NEXT: stp x9, x10, [x0, #16] -; GISEL-NEXT: orr x9, x15, x14, lsr #1 -; GISEL-NEXT: lsl x13, x16, #63 -; GISEL-NEXT: orr x10, x11, x12, lsr #1 -; GISEL-NEXT: stp x8, x9, [x0, #32] -; GISEL-NEXT: orr x8, x13, x17, asr #1 -; GISEL-NEXT: stp x10, x8, [x0, #48] +; GISEL-NEXT: ldp x8, x9, [x1] +; GISEL-NEXT: ldp x10, x11, [x1, #16] +; GISEL-NEXT: ldp x12, x13, [x1, #48] +; GISEL-NEXT: extr x8, x9, x8, #1 +; GISEL-NEXT: ldp x14, x15, [x1, #32] +; GISEL-NEXT: extr x9, x10, x9, #1 +; GISEL-NEXT: extr x10, x11, x10, #1 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: asr x8, x13, #63 +; GISEL-NEXT: extr x11, x14, x11, #1 +; GISEL-NEXT: extr x9, x15, x14, #1 +; GISEL-NEXT: lsl x8, x8, #63 +; GISEL-NEXT: stp x10, x11, [x0, #16] +; GISEL-NEXT: extr x10, x12, x15, #1 +; GISEL-NEXT: extr x11, x13, x12, #1 +; GISEL-NEXT: orr x8, x8, x13, asr #1 +; GISEL-NEXT: stp x9, x10, [x0, #32] +; GISEL-NEXT: stp x11, x8, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -5571,28 +5507,21 @@ define void @test_shl_i512_const_15(ptr %result, ptr %input) { ; GISEL-LABEL: test_shl_i512_const_15: ; GISEL: ; %bb.0: ; %entry ; GISEL-NEXT: ldp x8, x9, [x1] -; GISEL-NEXT: ldp x11, x12, [x1, #16] -; GISEL-NEXT: ldp x14, x15, [x1, #32] -; GISEL-NEXT: lsr x10, x8, #49 -; GISEL-NEXT: lsr x13, x9, #49 -; GISEL-NEXT: lsl x8, x8, #15 -; GISEL-NEXT: orr x9, x10, x9, lsl #15 -; GISEL-NEXT: lsr x10, x11, #49 -; GISEL-NEXT: orr x11, x13, x11, lsl #15 -; GISEL-NEXT: ldp x13, x16, [x1, #48] -; GISEL-NEXT: stp x8, x9, [x0] -; GISEL-NEXT: lsr x8, x12, #49 -; GISEL-NEXT: orr x10, x10, x12, lsl #15 -; GISEL-NEXT: lsr x12, x14, #49 -; GISEL-NEXT: lsr x9, x15, #49 -; GISEL-NEXT: orr x8, x8, x14, lsl #15 -; GISEL-NEXT: stp x11, x10, [x0, #16] -; GISEL-NEXT: orr x11, x12, x15, lsl #15 -; GISEL-NEXT: lsr x12, x13, #49 -; GISEL-NEXT: orr x9, x9, x13, lsl #15 -; GISEL-NEXT: stp x8, x11, [x0, #32] -; GISEL-NEXT: orr x8, x12, x16, lsl #15 -; GISEL-NEXT: stp x9, x8, [x0, #48] +; GISEL-NEXT: ldp x10, x11, [x1, #16] +; GISEL-NEXT: ldp x13, x14, [x1, #32] +; GISEL-NEXT: lsl x12, x8, #15 +; GISEL-NEXT: extr x8, x9, x8, #49 +; GISEL-NEXT: extr x9, x10, x9, #49 +; GISEL-NEXT: extr x10, x11, x10, #49 +; GISEL-NEXT: ldp x15, x16, [x1, #48] +; GISEL-NEXT: stp x12, x8, [x0] +; GISEL-NEXT: extr x8, x13, x11, #49 +; GISEL-NEXT: stp x9, x10, [x0, #16] +; GISEL-NEXT: extr x9, x14, x13, #49 +; GISEL-NEXT: extr x10, x15, x14, #49 +; GISEL-NEXT: stp x8, x9, [x0, #32] +; GISEL-NEXT: extr x8, x16, x15, #49 +; GISEL-NEXT: stp x10, x8, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -5624,30 +5553,22 @@ define void @test_lshr_i512_const_15(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_lshr_i512_const_15: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #8] -; GISEL-NEXT: ldr x11, [x1] -; GISEL-NEXT: ldp x10, x14, [x1, #24] -; GISEL-NEXT: ldr x16, [x1, #56] -; GISEL-NEXT: lsl x12, x8, #49 -; GISEL-NEXT: lsl x13, x9, #49 -; GISEL-NEXT: lsl x15, x10, #49 -; GISEL-NEXT: orr x11, x12, x11, lsr #15 -; GISEL-NEXT: orr x8, x13, x8, lsr #15 -; GISEL-NEXT: lsl x13, x14, #49 -; GISEL-NEXT: orr x9, x15, x9, lsr #15 -; GISEL-NEXT: ldp x12, x15, [x1, #40] -; GISEL-NEXT: stp x11, x8, [x0] -; GISEL-NEXT: orr x10, x13, x10, lsr #15 -; GISEL-NEXT: lsl x8, x16, #49 -; GISEL-NEXT: lsl x11, x12, #49 -; GISEL-NEXT: lsl x13, x15, #49 -; GISEL-NEXT: stp x9, x10, [x0, #16] -; GISEL-NEXT: orr x8, x8, x15, lsr #15 -; GISEL-NEXT: lsr x10, x16, #15 -; GISEL-NEXT: orr x11, x11, x14, lsr #15 -; GISEL-NEXT: orr x9, x13, x12, lsr #15 -; GISEL-NEXT: stp x8, x10, [x0, #48] -; GISEL-NEXT: stp x11, x9, [x0, #32] +; GISEL-NEXT: ldp x8, x9, [x1] +; GISEL-NEXT: ldp x10, x11, [x1, #16] +; GISEL-NEXT: ldp x12, x13, [x1, #32] +; GISEL-NEXT: extr x8, x9, x8, #15 +; GISEL-NEXT: ldp x14, x15, [x1, #48] +; GISEL-NEXT: extr x9, x10, x9, #15 +; GISEL-NEXT: extr x10, x11, x10, #15 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: extr x8, x12, x11, #15 +; GISEL-NEXT: extr x9, x13, x12, #15 +; GISEL-NEXT: stp x10, x8, [x0, #16] +; GISEL-NEXT: extr x10, x14, x13, #15 +; GISEL-NEXT: extr x8, x15, x14, #15 +; GISEL-NEXT: stp x9, x10, [x0, #32] +; GISEL-NEXT: lsr x9, x15, #15 +; GISEL-NEXT: stp x8, x9, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -5679,32 +5600,24 @@ define void @test_ashr_i512_const_15(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_ashr_i512_const_15: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #8] -; GISEL-NEXT: ldr x11, [x1] -; GISEL-NEXT: ldp x10, x13, [x1, #24] -; GISEL-NEXT: ldr x17, [x1, #56] -; GISEL-NEXT: lsl x12, x8, #49 -; GISEL-NEXT: lsl x15, x9, #49 -; GISEL-NEXT: lsl x16, x10, #49 -; GISEL-NEXT: orr x11, x12, x11, lsr #15 -; GISEL-NEXT: ldp x14, x12, [x1, #40] -; GISEL-NEXT: orr x8, x15, x8, lsr #15 -; GISEL-NEXT: lsl x15, x13, #49 -; GISEL-NEXT: orr x9, x16, x9, lsr #15 -; GISEL-NEXT: asr x16, x17, #63 -; GISEL-NEXT: stp x11, x8, [x0] -; GISEL-NEXT: lsl x11, x14, #49 -; GISEL-NEXT: orr x10, x15, x10, lsr #15 -; GISEL-NEXT: lsl x15, x12, #49 -; GISEL-NEXT: orr x8, x11, x13, lsr #15 -; GISEL-NEXT: lsl x11, x17, #49 -; GISEL-NEXT: stp x9, x10, [x0, #16] -; GISEL-NEXT: orr x9, x15, x14, lsr #15 -; GISEL-NEXT: lsl x13, x16, #49 -; GISEL-NEXT: orr x10, x11, x12, lsr #15 -; GISEL-NEXT: stp x8, x9, [x0, #32] -; GISEL-NEXT: orr x8, x13, x17, asr #15 -; GISEL-NEXT: stp x10, x8, [x0, #48] +; GISEL-NEXT: ldp x8, x9, [x1] +; GISEL-NEXT: ldp x10, x11, [x1, #16] +; GISEL-NEXT: ldp x12, x13, [x1, #48] +; GISEL-NEXT: extr x8, x9, x8, #15 +; GISEL-NEXT: ldp x14, x15, [x1, #32] +; GISEL-NEXT: extr x9, x10, x9, #15 +; GISEL-NEXT: extr x10, x11, x10, #15 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: asr x8, x13, #63 +; GISEL-NEXT: extr x11, x14, x11, #15 +; GISEL-NEXT: extr x9, x15, x14, #15 +; GISEL-NEXT: lsl x8, x8, #49 +; GISEL-NEXT: stp x10, x11, [x0, #16] +; GISEL-NEXT: extr x10, x12, x15, #15 +; GISEL-NEXT: extr x11, x13, x12, #15 +; GISEL-NEXT: orr x8, x8, x13, asr #15 +; GISEL-NEXT: stp x9, x10, [x0, #32] +; GISEL-NEXT: stp x11, x8, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -5738,28 +5651,21 @@ define void @test_shl_i512_const_63(ptr %result, ptr %input) { ; GISEL-LABEL: test_shl_i512_const_63: ; GISEL: ; %bb.0: ; %entry ; GISEL-NEXT: ldp x8, x9, [x1] -; GISEL-NEXT: ldp x11, x12, [x1, #16] -; GISEL-NEXT: ldp x14, x15, [x1, #32] -; GISEL-NEXT: lsr x10, x8, #1 -; GISEL-NEXT: lsr x13, x9, #1 -; GISEL-NEXT: lsl x8, x8, #63 -; GISEL-NEXT: orr x9, x10, x9, lsl #63 -; GISEL-NEXT: lsr x10, x11, #1 -; GISEL-NEXT: orr x11, x13, x11, lsl #63 -; GISEL-NEXT: ldp x13, x16, [x1, #48] -; GISEL-NEXT: stp x8, x9, [x0] -; GISEL-NEXT: lsr x8, x12, #1 -; GISEL-NEXT: orr x10, x10, x12, lsl #63 -; GISEL-NEXT: lsr x12, x14, #1 -; GISEL-NEXT: lsr x9, x15, #1 -; GISEL-NEXT: orr x8, x8, x14, lsl #63 -; GISEL-NEXT: stp x11, x10, [x0, #16] -; GISEL-NEXT: orr x11, x12, x15, lsl #63 -; GISEL-NEXT: lsr x12, x13, #1 -; GISEL-NEXT: orr x9, x9, x13, lsl #63 -; GISEL-NEXT: stp x8, x11, [x0, #32] -; GISEL-NEXT: orr x8, x12, x16, lsl #63 -; GISEL-NEXT: stp x9, x8, [x0, #48] +; GISEL-NEXT: ldp x10, x11, [x1, #16] +; GISEL-NEXT: ldp x13, x14, [x1, #32] +; GISEL-NEXT: lsl x12, x8, #63 +; GISEL-NEXT: extr x8, x9, x8, #1 +; GISEL-NEXT: extr x9, x10, x9, #1 +; GISEL-NEXT: extr x10, x11, x10, #1 +; GISEL-NEXT: ldp x15, x16, [x1, #48] +; GISEL-NEXT: stp x12, x8, [x0] +; GISEL-NEXT: extr x8, x13, x11, #1 +; GISEL-NEXT: stp x9, x10, [x0, #16] +; GISEL-NEXT: extr x9, x14, x13, #1 +; GISEL-NEXT: extr x10, x15, x14, #1 +; GISEL-NEXT: stp x8, x9, [x0, #32] +; GISEL-NEXT: extr x8, x16, x15, #1 +; GISEL-NEXT: stp x10, x8, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -5791,30 +5697,22 @@ define void @test_lshr_i512_const_63(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_lshr_i512_const_63: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #8] -; GISEL-NEXT: ldr x11, [x1] -; GISEL-NEXT: ldp x10, x14, [x1, #24] -; GISEL-NEXT: ldr x16, [x1, #56] -; GISEL-NEXT: lsl x12, x8, #1 -; GISEL-NEXT: lsl x13, x9, #1 -; GISEL-NEXT: lsl x15, x10, #1 -; GISEL-NEXT: orr x11, x12, x11, lsr #63 -; GISEL-NEXT: orr x8, x13, x8, lsr #63 -; GISEL-NEXT: lsl x13, x14, #1 -; GISEL-NEXT: orr x9, x15, x9, lsr #63 -; GISEL-NEXT: ldp x12, x15, [x1, #40] -; GISEL-NEXT: stp x11, x8, [x0] -; GISEL-NEXT: orr x10, x13, x10, lsr #63 -; GISEL-NEXT: lsl x8, x16, #1 -; GISEL-NEXT: lsl x11, x12, #1 -; GISEL-NEXT: lsl x13, x15, #1 -; GISEL-NEXT: stp x9, x10, [x0, #16] -; GISEL-NEXT: orr x8, x8, x15, lsr #63 -; GISEL-NEXT: lsr x10, x16, #63 -; GISEL-NEXT: orr x11, x11, x14, lsr #63 -; GISEL-NEXT: orr x9, x13, x12, lsr #63 -; GISEL-NEXT: stp x8, x10, [x0, #48] -; GISEL-NEXT: stp x11, x9, [x0, #32] +; GISEL-NEXT: ldp x8, x9, [x1] +; GISEL-NEXT: ldp x10, x11, [x1, #16] +; GISEL-NEXT: ldp x12, x13, [x1, #32] +; GISEL-NEXT: extr x8, x9, x8, #63 +; GISEL-NEXT: ldp x14, x15, [x1, #48] +; GISEL-NEXT: extr x9, x10, x9, #63 +; GISEL-NEXT: extr x10, x11, x10, #63 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: extr x8, x12, x11, #63 +; GISEL-NEXT: extr x9, x13, x12, #63 +; GISEL-NEXT: stp x10, x8, [x0, #16] +; GISEL-NEXT: extr x10, x14, x13, #63 +; GISEL-NEXT: extr x8, x15, x14, #63 +; GISEL-NEXT: stp x9, x10, [x0, #32] +; GISEL-NEXT: lsr x9, x15, #63 +; GISEL-NEXT: stp x8, x9, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -5846,30 +5744,22 @@ define void @test_ashr_i512_const_63(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_ashr_i512_const_63: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #8] -; GISEL-NEXT: ldr x10, [x1] -; GISEL-NEXT: ldp x11, x13, [x1, #24] -; GISEL-NEXT: ldr x17, [x1, #56] -; GISEL-NEXT: lsl x15, x9, #1 -; GISEL-NEXT: lsl x12, x8, #1 -; GISEL-NEXT: lsl x16, x11, #1 -; GISEL-NEXT: orr x8, x15, x8, lsr #63 -; GISEL-NEXT: lsl x15, x13, #1 -; GISEL-NEXT: orr x10, x12, x10, lsr #63 -; GISEL-NEXT: ldp x14, x12, [x1, #40] -; GISEL-NEXT: orr x9, x16, x9, lsr #63 -; GISEL-NEXT: orr x11, x15, x11, lsr #63 -; GISEL-NEXT: stp x10, x8, [x0] -; GISEL-NEXT: lsl x8, x17, #1 -; GISEL-NEXT: lsl x16, x14, #1 -; GISEL-NEXT: lsl x10, x12, #1 -; GISEL-NEXT: stp x9, x11, [x0, #16] -; GISEL-NEXT: asr x9, x17, #63 -; GISEL-NEXT: orr x8, x8, x12, lsr #63 -; GISEL-NEXT: orr x13, x16, x13, lsr #63 -; GISEL-NEXT: orr x10, x10, x14, lsr #63 -; GISEL-NEXT: orr x9, x9, x9, lsl #1 -; GISEL-NEXT: stp x13, x10, [x0, #32] +; GISEL-NEXT: ldp x8, x9, [x1] +; GISEL-NEXT: ldp x10, x11, [x1, #16] +; GISEL-NEXT: ldp x12, x13, [x1, #32] +; GISEL-NEXT: extr x8, x9, x8, #63 +; GISEL-NEXT: ldp x14, x15, [x1, #48] +; GISEL-NEXT: extr x9, x10, x9, #63 +; GISEL-NEXT: extr x10, x11, x10, #63 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: extr x8, x12, x11, #63 +; GISEL-NEXT: extr x9, x13, x12, #63 +; GISEL-NEXT: extr x11, x14, x13, #63 +; GISEL-NEXT: stp x10, x8, [x0, #16] +; GISEL-NEXT: asr x10, x15, #63 +; GISEL-NEXT: extr x8, x15, x14, #63 +; GISEL-NEXT: stp x9, x11, [x0, #32] +; GISEL-NEXT: orr x9, x10, x10, lsl #1 ; GISEL-NEXT: stp x8, x9, [x0, #48] ; GISEL-NEXT: ret entry: @@ -5906,23 +5796,17 @@ define void @test_shl_i512_const_65(ptr %result, ptr %input) { ; GISEL-NEXT: ldr x15, [x1, #48] ; GISEL-NEXT: ldp x10, x11, [x1, #16] ; GISEL-NEXT: ldp x12, x13, [x1, #32] -; GISEL-NEXT: lsr x14, x8, #63 -; GISEL-NEXT: lsr x16, x9, #63 -; GISEL-NEXT: lsl x8, x8, #1 -; GISEL-NEXT: orr x9, x14, x9, lsl #1 -; GISEL-NEXT: lsr x14, x10, #63 -; GISEL-NEXT: orr x10, x16, x10, lsl #1 -; GISEL-NEXT: stp xzr, x8, [x0] -; GISEL-NEXT: lsr x8, x11, #63 -; GISEL-NEXT: orr x11, x14, x11, lsl #1 -; GISEL-NEXT: lsr x14, x12, #63 -; GISEL-NEXT: stp x9, x10, [x0, #16] -; GISEL-NEXT: lsr x9, x13, #63 -; GISEL-NEXT: orr x8, x8, x12, lsl #1 -; GISEL-NEXT: orr x10, x14, x13, lsl #1 -; GISEL-NEXT: orr x9, x9, x15, lsl #1 -; GISEL-NEXT: stp x11, x8, [x0, #32] -; GISEL-NEXT: stp x10, x9, [x0, #48] +; GISEL-NEXT: lsl x14, x8, #1 +; GISEL-NEXT: extr x8, x9, x8, #63 +; GISEL-NEXT: extr x9, x10, x9, #63 +; GISEL-NEXT: extr x10, x11, x10, #63 +; GISEL-NEXT: stp xzr, x14, [x0] +; GISEL-NEXT: stp x8, x9, [x0, #16] +; GISEL-NEXT: extr x8, x12, x11, #63 +; GISEL-NEXT: extr x9, x13, x12, #63 +; GISEL-NEXT: stp x10, x8, [x0, #32] +; GISEL-NEXT: extr x10, x15, x13, #63 +; GISEL-NEXT: stp x9, x10, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -5953,27 +5837,21 @@ define void @test_lshr_i512_const_65(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_lshr_i512_const_65: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #16] -; GISEL-NEXT: ldr x10, [x1, #8] -; GISEL-NEXT: ldp x11, x14, [x1, #32] -; GISEL-NEXT: ldp x15, x16, [x1, #48] -; GISEL-NEXT: lsl x12, x8, #63 -; GISEL-NEXT: lsl x13, x9, #63 -; GISEL-NEXT: orr x10, x12, x10, lsr #1 -; GISEL-NEXT: lsl x12, x11, #63 -; GISEL-NEXT: orr x8, x13, x8, lsr #1 -; GISEL-NEXT: lsl x13, x14, #63 -; GISEL-NEXT: orr x9, x12, x9, lsr #1 -; GISEL-NEXT: stp x10, x8, [x0] -; GISEL-NEXT: lsl x10, x15, #63 -; GISEL-NEXT: orr x11, x13, x11, lsr #1 -; GISEL-NEXT: lsl x12, x16, #63 -; GISEL-NEXT: orr x8, x10, x14, lsr #1 -; GISEL-NEXT: lsr x10, x16, #1 -; GISEL-NEXT: stp x9, x11, [x0, #16] -; GISEL-NEXT: orr x9, x12, x15, lsr #1 -; GISEL-NEXT: stp x10, xzr, [x0, #48] -; GISEL-NEXT: stp x8, x9, [x0, #32] +; GISEL-NEXT: ldp x8, x9, [x1, #8] +; GISEL-NEXT: ldr x14, [x1, #56] +; GISEL-NEXT: ldp x10, x11, [x1, #24] +; GISEL-NEXT: ldp x12, x13, [x1, #40] +; GISEL-NEXT: extr x8, x9, x8, #1 +; GISEL-NEXT: extr x9, x10, x9, #1 +; GISEL-NEXT: extr x10, x11, x10, #1 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: extr x8, x12, x11, #1 +; GISEL-NEXT: extr x9, x13, x12, #1 +; GISEL-NEXT: stp x10, x8, [x0, #16] +; GISEL-NEXT: extr x10, x14, x13, #1 +; GISEL-NEXT: lsr x8, x14, #1 +; GISEL-NEXT: stp x9, x10, [x0, #32] +; GISEL-NEXT: stp x8, xzr, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -6005,29 +5883,23 @@ define void @test_ashr_i512_const_65(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_ashr_i512_const_65: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #16] -; GISEL-NEXT: ldr x11, [x1, #8] -; GISEL-NEXT: ldp x10, x13, [x1, #32] -; GISEL-NEXT: lsl x12, x8, #63 -; GISEL-NEXT: lsl x14, x9, #63 -; GISEL-NEXT: lsl x15, x10, #63 -; GISEL-NEXT: orr x11, x12, x11, lsr #1 -; GISEL-NEXT: ldp x12, x16, [x1, #48] -; GISEL-NEXT: orr x8, x14, x8, lsr #1 -; GISEL-NEXT: lsl x14, x13, #63 -; GISEL-NEXT: orr x9, x15, x9, lsr #1 -; GISEL-NEXT: asr x15, x16, #63 -; GISEL-NEXT: stp x11, x8, [x0] -; GISEL-NEXT: lsl x11, x12, #63 -; GISEL-NEXT: orr x10, x14, x10, lsr #1 -; GISEL-NEXT: lsl x14, x16, #63 -; GISEL-NEXT: orr x8, x11, x13, lsr #1 +; GISEL-NEXT: ldp x8, x9, [x1, #8] +; GISEL-NEXT: ldr x13, [x1, #40] +; GISEL-NEXT: ldp x10, x11, [x1, #24] +; GISEL-NEXT: ldp x14, x12, [x1, #48] +; GISEL-NEXT: extr x8, x9, x8, #1 +; GISEL-NEXT: extr x9, x10, x9, #1 +; GISEL-NEXT: extr x10, x11, x10, #1 +; GISEL-NEXT: asr x15, x12, #63 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: extr x8, x13, x11, #1 +; GISEL-NEXT: extr x9, x14, x13, #1 ; GISEL-NEXT: lsl x11, x15, #63 -; GISEL-NEXT: stp x9, x10, [x0, #16] -; GISEL-NEXT: orr x9, x14, x12, lsr #1 -; GISEL-NEXT: orr x10, x11, x16, asr #1 -; GISEL-NEXT: stp x8, x9, [x0, #32] -; GISEL-NEXT: stp x10, x15, [x0, #48] +; GISEL-NEXT: stp x10, x8, [x0, #16] +; GISEL-NEXT: extr x10, x12, x14, #1 +; GISEL-NEXT: orr x8, x11, x12, asr #1 +; GISEL-NEXT: stp x9, x10, [x0, #32] +; GISEL-NEXT: stp x8, x15, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -6062,23 +5934,17 @@ define void @test_shl_i512_const_100(ptr %result, ptr %input) { ; GISEL-NEXT: ldr x15, [x1, #48] ; GISEL-NEXT: ldp x10, x11, [x1, #16] ; GISEL-NEXT: ldp x12, x13, [x1, #32] -; GISEL-NEXT: lsr x14, x8, #28 -; GISEL-NEXT: lsr x16, x9, #28 -; GISEL-NEXT: lsl x8, x8, #36 -; GISEL-NEXT: orr x9, x14, x9, lsl #36 -; GISEL-NEXT: lsr x14, x10, #28 -; GISEL-NEXT: orr x10, x16, x10, lsl #36 -; GISEL-NEXT: stp xzr, x8, [x0] -; GISEL-NEXT: lsr x8, x11, #28 -; GISEL-NEXT: orr x11, x14, x11, lsl #36 -; GISEL-NEXT: lsr x14, x12, #28 -; GISEL-NEXT: stp x9, x10, [x0, #16] -; GISEL-NEXT: lsr x9, x13, #28 -; GISEL-NEXT: orr x8, x8, x12, lsl #36 -; GISEL-NEXT: orr x10, x14, x13, lsl #36 -; GISEL-NEXT: orr x9, x9, x15, lsl #36 -; GISEL-NEXT: stp x11, x8, [x0, #32] -; GISEL-NEXT: stp x10, x9, [x0, #48] +; GISEL-NEXT: lsl x14, x8, #36 +; GISEL-NEXT: extr x8, x9, x8, #28 +; GISEL-NEXT: extr x9, x10, x9, #28 +; GISEL-NEXT: extr x10, x11, x10, #28 +; GISEL-NEXT: stp xzr, x14, [x0] +; GISEL-NEXT: stp x8, x9, [x0, #16] +; GISEL-NEXT: extr x8, x12, x11, #28 +; GISEL-NEXT: extr x9, x13, x12, #28 +; GISEL-NEXT: stp x10, x8, [x0, #32] +; GISEL-NEXT: extr x10, x15, x13, #28 +; GISEL-NEXT: stp x9, x10, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -6109,27 +5975,21 @@ define void @test_lshr_i512_const_100(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_lshr_i512_const_100: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #16] -; GISEL-NEXT: ldr x10, [x1, #8] -; GISEL-NEXT: ldp x11, x14, [x1, #32] -; GISEL-NEXT: ldp x15, x16, [x1, #48] -; GISEL-NEXT: lsl x12, x8, #28 -; GISEL-NEXT: lsl x13, x9, #28 -; GISEL-NEXT: orr x10, x12, x10, lsr #36 -; GISEL-NEXT: lsl x12, x11, #28 -; GISEL-NEXT: orr x8, x13, x8, lsr #36 -; GISEL-NEXT: lsl x13, x14, #28 -; GISEL-NEXT: orr x9, x12, x9, lsr #36 -; GISEL-NEXT: stp x10, x8, [x0] -; GISEL-NEXT: lsl x10, x15, #28 -; GISEL-NEXT: orr x11, x13, x11, lsr #36 -; GISEL-NEXT: lsl x12, x16, #28 -; GISEL-NEXT: orr x8, x10, x14, lsr #36 -; GISEL-NEXT: lsr x10, x16, #36 -; GISEL-NEXT: stp x9, x11, [x0, #16] -; GISEL-NEXT: orr x9, x12, x15, lsr #36 -; GISEL-NEXT: stp x10, xzr, [x0, #48] -; GISEL-NEXT: stp x8, x9, [x0, #32] +; GISEL-NEXT: ldp x8, x9, [x1, #8] +; GISEL-NEXT: ldr x14, [x1, #56] +; GISEL-NEXT: ldp x10, x11, [x1, #24] +; GISEL-NEXT: ldp x12, x13, [x1, #40] +; GISEL-NEXT: extr x8, x9, x8, #36 +; GISEL-NEXT: extr x9, x10, x9, #36 +; GISEL-NEXT: extr x10, x11, x10, #36 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: extr x8, x12, x11, #36 +; GISEL-NEXT: extr x9, x13, x12, #36 +; GISEL-NEXT: stp x10, x8, [x0, #16] +; GISEL-NEXT: extr x10, x14, x13, #36 +; GISEL-NEXT: lsr x8, x14, #36 +; GISEL-NEXT: stp x9, x10, [x0, #32] +; GISEL-NEXT: stp x8, xzr, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -6161,29 +6021,23 @@ define void @test_ashr_i512_const_100(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_ashr_i512_const_100: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #16] -; GISEL-NEXT: ldr x11, [x1, #8] -; GISEL-NEXT: ldp x10, x13, [x1, #32] -; GISEL-NEXT: lsl x12, x8, #28 -; GISEL-NEXT: lsl x14, x9, #28 -; GISEL-NEXT: lsl x15, x10, #28 -; GISEL-NEXT: orr x11, x12, x11, lsr #36 -; GISEL-NEXT: ldp x12, x16, [x1, #48] -; GISEL-NEXT: orr x8, x14, x8, lsr #36 -; GISEL-NEXT: lsl x14, x13, #28 -; GISEL-NEXT: orr x9, x15, x9, lsr #36 -; GISEL-NEXT: asr x15, x16, #63 -; GISEL-NEXT: stp x11, x8, [x0] -; GISEL-NEXT: lsl x11, x12, #28 -; GISEL-NEXT: orr x10, x14, x10, lsr #36 -; GISEL-NEXT: lsl x14, x16, #28 -; GISEL-NEXT: orr x8, x11, x13, lsr #36 +; GISEL-NEXT: ldp x8, x9, [x1, #8] +; GISEL-NEXT: ldr x13, [x1, #40] +; GISEL-NEXT: ldp x10, x11, [x1, #24] +; GISEL-NEXT: ldp x14, x12, [x1, #48] +; GISEL-NEXT: extr x8, x9, x8, #36 +; GISEL-NEXT: extr x9, x10, x9, #36 +; GISEL-NEXT: extr x10, x11, x10, #36 +; GISEL-NEXT: asr x15, x12, #63 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: extr x8, x13, x11, #36 +; GISEL-NEXT: extr x9, x14, x13, #36 ; GISEL-NEXT: lsl x11, x15, #28 -; GISEL-NEXT: stp x9, x10, [x0, #16] -; GISEL-NEXT: orr x9, x14, x12, lsr #36 -; GISEL-NEXT: orr x10, x11, x16, asr #36 -; GISEL-NEXT: stp x8, x9, [x0, #32] -; GISEL-NEXT: stp x10, x15, [x0, #48] +; GISEL-NEXT: stp x10, x8, [x0, #16] +; GISEL-NEXT: extr x10, x12, x14, #36 +; GISEL-NEXT: orr x8, x11, x12, asr #36 +; GISEL-NEXT: stp x9, x10, [x0, #32] +; GISEL-NEXT: stp x8, x15, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -6219,23 +6073,17 @@ define void @test_shl_i512_const_127(ptr %result, ptr %input) { ; GISEL-NEXT: ldr x15, [x1, #48] ; GISEL-NEXT: ldp x10, x11, [x1, #16] ; GISEL-NEXT: ldp x12, x13, [x1, #32] -; GISEL-NEXT: lsr x14, x8, #1 -; GISEL-NEXT: lsr x16, x9, #1 -; GISEL-NEXT: lsl x8, x8, #63 -; GISEL-NEXT: orr x9, x14, x9, lsl #63 -; GISEL-NEXT: lsr x14, x10, #1 -; GISEL-NEXT: orr x10, x16, x10, lsl #63 -; GISEL-NEXT: stp xzr, x8, [x0] -; GISEL-NEXT: lsr x8, x11, #1 -; GISEL-NEXT: orr x11, x14, x11, lsl #63 -; GISEL-NEXT: lsr x14, x12, #1 -; GISEL-NEXT: stp x9, x10, [x0, #16] -; GISEL-NEXT: lsr x9, x13, #1 -; GISEL-NEXT: orr x8, x8, x12, lsl #63 -; GISEL-NEXT: orr x10, x14, x13, lsl #63 -; GISEL-NEXT: orr x9, x9, x15, lsl #63 -; GISEL-NEXT: stp x11, x8, [x0, #32] -; GISEL-NEXT: stp x10, x9, [x0, #48] +; GISEL-NEXT: lsl x14, x8, #63 +; GISEL-NEXT: extr x8, x9, x8, #1 +; GISEL-NEXT: extr x9, x10, x9, #1 +; GISEL-NEXT: extr x10, x11, x10, #1 +; GISEL-NEXT: stp xzr, x14, [x0] +; GISEL-NEXT: stp x8, x9, [x0, #16] +; GISEL-NEXT: extr x8, x12, x11, #1 +; GISEL-NEXT: extr x9, x13, x12, #1 +; GISEL-NEXT: stp x10, x8, [x0, #32] +; GISEL-NEXT: extr x10, x15, x13, #1 +; GISEL-NEXT: stp x9, x10, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -6266,27 +6114,21 @@ define void @test_lshr_i512_const_127(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_lshr_i512_const_127: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #16] -; GISEL-NEXT: ldr x10, [x1, #8] -; GISEL-NEXT: ldp x11, x14, [x1, #32] -; GISEL-NEXT: ldp x15, x16, [x1, #48] -; GISEL-NEXT: lsl x12, x8, #1 -; GISEL-NEXT: lsl x13, x9, #1 -; GISEL-NEXT: orr x10, x12, x10, lsr #63 -; GISEL-NEXT: lsl x12, x11, #1 -; GISEL-NEXT: orr x8, x13, x8, lsr #63 -; GISEL-NEXT: lsl x13, x14, #1 -; GISEL-NEXT: orr x9, x12, x9, lsr #63 -; GISEL-NEXT: stp x10, x8, [x0] -; GISEL-NEXT: lsl x10, x15, #1 -; GISEL-NEXT: orr x11, x13, x11, lsr #63 -; GISEL-NEXT: lsl x12, x16, #1 -; GISEL-NEXT: orr x8, x10, x14, lsr #63 -; GISEL-NEXT: lsr x10, x16, #63 -; GISEL-NEXT: stp x9, x11, [x0, #16] -; GISEL-NEXT: orr x9, x12, x15, lsr #63 -; GISEL-NEXT: stp x10, xzr, [x0, #48] -; GISEL-NEXT: stp x8, x9, [x0, #32] +; GISEL-NEXT: ldp x8, x9, [x1, #8] +; GISEL-NEXT: ldr x14, [x1, #56] +; GISEL-NEXT: ldp x10, x11, [x1, #24] +; GISEL-NEXT: ldp x12, x13, [x1, #40] +; GISEL-NEXT: extr x8, x9, x8, #63 +; GISEL-NEXT: extr x9, x10, x9, #63 +; GISEL-NEXT: extr x10, x11, x10, #63 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: extr x8, x12, x11, #63 +; GISEL-NEXT: extr x9, x13, x12, #63 +; GISEL-NEXT: stp x10, x8, [x0, #16] +; GISEL-NEXT: extr x10, x14, x13, #63 +; GISEL-NEXT: lsr x8, x14, #63 +; GISEL-NEXT: stp x9, x10, [x0, #32] +; GISEL-NEXT: stp x8, xzr, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 @@ -6317,28 +6159,22 @@ define void @test_ashr_i512_const_127(ptr %result, ptr %input) { ; ; GISEL-LABEL: test_ashr_i512_const_127: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: ldp x8, x9, [x1, #16] -; GISEL-NEXT: ldr x10, [x1, #8] -; GISEL-NEXT: ldp x11, x14, [x1, #32] -; GISEL-NEXT: ldp x15, x16, [x1, #48] -; GISEL-NEXT: lsl x12, x8, #1 -; GISEL-NEXT: lsl x13, x9, #1 -; GISEL-NEXT: orr x10, x12, x10, lsr #63 -; GISEL-NEXT: lsl x12, x11, #1 -; GISEL-NEXT: orr x8, x13, x8, lsr #63 -; GISEL-NEXT: lsl x13, x14, #1 -; GISEL-NEXT: orr x9, x12, x9, lsr #63 -; GISEL-NEXT: lsl x12, x15, #1 -; GISEL-NEXT: stp x10, x8, [x0] -; GISEL-NEXT: lsl x10, x16, #1 -; GISEL-NEXT: orr x11, x13, x11, lsr #63 -; GISEL-NEXT: asr x8, x16, #63 -; GISEL-NEXT: orr x12, x12, x14, lsr #63 -; GISEL-NEXT: stp x9, x11, [x0, #16] -; GISEL-NEXT: orr x9, x10, x15, lsr #63 -; GISEL-NEXT: orr x10, x8, x8, lsl #1 -; GISEL-NEXT: stp x12, x9, [x0, #32] -; GISEL-NEXT: stp x10, x8, [x0, #48] +; GISEL-NEXT: ldp x8, x9, [x1, #8] +; GISEL-NEXT: ldr x14, [x1, #56] +; GISEL-NEXT: ldp x10, x11, [x1, #24] +; GISEL-NEXT: ldp x12, x13, [x1, #40] +; GISEL-NEXT: extr x8, x9, x8, #63 +; GISEL-NEXT: extr x9, x10, x9, #63 +; GISEL-NEXT: extr x10, x11, x10, #63 +; GISEL-NEXT: stp x8, x9, [x0] +; GISEL-NEXT: extr x8, x12, x11, #63 +; GISEL-NEXT: asr x9, x14, #63 +; GISEL-NEXT: extr x11, x13, x12, #63 +; GISEL-NEXT: stp x10, x8, [x0, #16] +; GISEL-NEXT: extr x10, x14, x13, #63 +; GISEL-NEXT: orr x8, x9, x9, lsl #1 +; GISEL-NEXT: stp x11, x10, [x0, #32] +; GISEL-NEXT: stp x8, x9, [x0, #48] ; GISEL-NEXT: ret entry: %input_val = load i512, ptr %input, align 64 diff --git a/llvm/test/CodeGen/AArch64/adc.ll b/llvm/test/CodeGen/AArch64/adc.ll index 12e8bf2..03f3cf1 100644 --- a/llvm/test/CodeGen/AArch64/adc.ll +++ b/llvm/test/CodeGen/AArch64/adc.ll @@ -71,9 +71,8 @@ define i128 @test_shifted(i128 %a, i128 %b) { ; ; CHECK-GI-LABEL: test_shifted: ; CHECK-GI: ; %bb.0: -; CHECK-GI-NEXT: lsr x8, x2, #19 +; CHECK-GI-NEXT: extr x8, x3, x2, #19 ; CHECK-GI-NEXT: adds x0, x0, x2, lsl #45 -; CHECK-GI-NEXT: orr x8, x8, x3, lsl #45 ; CHECK-GI-NEXT: adc x1, x1, x8 ; CHECK-GI-NEXT: ret %rhs = shl i128 %b, 45 @@ -108,8 +107,7 @@ define i128 @test_extended(i128 %a, i16 %b) { ; CHECK-GI-NEXT: sxth x8, w2 ; CHECK-GI-NEXT: adds x0, x0, w2, sxth #3 ; CHECK-GI-NEXT: asr x9, x8, #63 -; CHECK-GI-NEXT: lsr x8, x8, #61 -; CHECK-GI-NEXT: orr x8, x8, x9, lsl #3 +; CHECK-GI-NEXT: extr x8, x9, x8, #61 ; CHECK-GI-NEXT: adc x1, x1, x8 ; CHECK-GI-NEXT: ret %ext = sext i16 %b to i128 diff --git a/llvm/test/CodeGen/AArch64/fsh.ll b/llvm/test/CodeGen/AArch64/fsh.ll index 765f6b7..7f07ef4 100644 --- a/llvm/test/CodeGen/AArch64/fsh.ll +++ b/llvm/test/CodeGen/AArch64/fsh.ll @@ -510,41 +510,40 @@ define i128 @fshl_i128(i128 %a, i128 %b, i128 %c) { ; ; CHECK-GI-LABEL: fshl_i128: ; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov w8, #64 // =0x40 ; CHECK-GI-NEXT: and x9, x4, #0x7f -; CHECK-GI-NEXT: mov w10, #64 // =0x40 -; CHECK-GI-NEXT: lsl x14, x3, #63 -; CHECK-GI-NEXT: sub x12, x10, x9 +; CHECK-GI-NEXT: mov w10, #127 // =0x7f +; CHECK-GI-NEXT: sub x12, x8, x9 ; CHECK-GI-NEXT: lsl x13, x1, x9 -; CHECK-GI-NEXT: mov w8, #127 // =0x7f +; CHECK-GI-NEXT: bic x10, x10, x4 ; CHECK-GI-NEXT: lsr x12, x0, x12 -; CHECK-GI-NEXT: bic x8, x8, x4 -; CHECK-GI-NEXT: sub x15, x9, #64 +; CHECK-GI-NEXT: sub x14, x9, #64 +; CHECK-GI-NEXT: lsl x15, x0, x9 +; CHECK-GI-NEXT: extr x16, x3, x2, #1 ; CHECK-GI-NEXT: cmp x9, #64 -; CHECK-GI-NEXT: lsl x9, x0, x9 -; CHECK-GI-NEXT: lsl x15, x0, x15 -; CHECK-GI-NEXT: orr x12, x12, x13 -; CHECK-GI-NEXT: orr x13, x14, x2, lsr #1 -; CHECK-GI-NEXT: lsr x14, x3, #1 -; CHECK-GI-NEXT: sub x10, x10, x8 -; CHECK-GI-NEXT: sub x16, x8, #64 -; CHECK-GI-NEXT: csel x9, x9, xzr, lo -; CHECK-GI-NEXT: lsr x17, x13, x8 -; CHECK-GI-NEXT: lsl x10, x14, x10 -; CHECK-GI-NEXT: csel x12, x12, x15, lo +; CHECK-GI-NEXT: sub x8, x8, x10 +; CHECK-GI-NEXT: orr x9, x12, x13 +; CHECK-GI-NEXT: lsr x12, x3, #1 +; CHECK-GI-NEXT: lsl x13, x0, x14 +; CHECK-GI-NEXT: csel x14, x15, xzr, lo +; CHECK-GI-NEXT: sub x15, x10, #64 +; CHECK-GI-NEXT: lsr x17, x16, x10 +; CHECK-GI-NEXT: lsl x8, x12, x8 +; CHECK-GI-NEXT: csel x9, x9, x13, lo ; CHECK-GI-NEXT: tst x4, #0x7f -; CHECK-GI-NEXT: lsr x15, x14, x16 +; CHECK-GI-NEXT: lsr x13, x12, x15 ; CHECK-GI-NEXT: mvn x11, x4 -; CHECK-GI-NEXT: csel x12, x1, x12, eq -; CHECK-GI-NEXT: orr x10, x17, x10 -; CHECK-GI-NEXT: cmp x8, #64 -; CHECK-GI-NEXT: lsr x14, x14, x8 -; CHECK-GI-NEXT: csel x10, x10, x15, lo +; CHECK-GI-NEXT: csel x9, x1, x9, eq +; CHECK-GI-NEXT: orr x8, x17, x8 +; CHECK-GI-NEXT: cmp x10, #64 +; CHECK-GI-NEXT: lsr x12, x12, x10 +; CHECK-GI-NEXT: csel x8, x8, x13, lo ; CHECK-GI-NEXT: tst x11, #0x7f -; CHECK-GI-NEXT: csel x10, x13, x10, eq -; CHECK-GI-NEXT: cmp x8, #64 -; CHECK-GI-NEXT: csel x8, x14, xzr, lo -; CHECK-GI-NEXT: orr x0, x9, x10 -; CHECK-GI-NEXT: orr x1, x12, x8 +; CHECK-GI-NEXT: csel x8, x16, x8, eq +; CHECK-GI-NEXT: cmp x10, #64 +; CHECK-GI-NEXT: csel x10, x12, xzr, lo +; CHECK-GI-NEXT: orr x0, x14, x8 +; CHECK-GI-NEXT: orr x1, x9, x10 ; CHECK-GI-NEXT: ret entry: %d = call i128 @llvm.fshl(i128 %a, i128 %b, i128 %c) @@ -571,41 +570,40 @@ define i128 @fshr_i128(i128 %a, i128 %b, i128 %c) { ; ; CHECK-GI-LABEL: fshr_i128: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: lsr x8, x0, #63 -; CHECK-GI-NEXT: mov w9, #127 // =0x7f -; CHECK-GI-NEXT: mov w10, #64 // =0x40 -; CHECK-GI-NEXT: bic x9, x9, x4 -; CHECK-GI-NEXT: lsl x11, x0, #1 -; CHECK-GI-NEXT: and x12, x4, #0x7f -; CHECK-GI-NEXT: orr x8, x8, x1, lsl #1 -; CHECK-GI-NEXT: sub x14, x10, x9 -; CHECK-GI-NEXT: sub x17, x9, #64 -; CHECK-GI-NEXT: lsl x15, x11, x9 -; CHECK-GI-NEXT: lsr x14, x11, x14 -; CHECK-GI-NEXT: cmp x9, #64 -; CHECK-GI-NEXT: lsl x16, x8, x9 -; CHECK-GI-NEXT: sub x9, x10, x12 -; CHECK-GI-NEXT: lsl x10, x11, x17 -; CHECK-GI-NEXT: mvn x13, x4 -; CHECK-GI-NEXT: csel x11, x15, xzr, lo -; CHECK-GI-NEXT: sub x15, x12, #64 -; CHECK-GI-NEXT: orr x14, x14, x16 -; CHECK-GI-NEXT: lsr x16, x2, x12 -; CHECK-GI-NEXT: lsl x9, x3, x9 -; CHECK-GI-NEXT: csel x10, x14, x10, lo -; CHECK-GI-NEXT: tst x13, #0x7f -; CHECK-GI-NEXT: lsr x13, x3, x15 -; CHECK-GI-NEXT: csel x8, x8, x10, eq -; CHECK-GI-NEXT: orr x9, x16, x9 -; CHECK-GI-NEXT: cmp x12, #64 -; CHECK-GI-NEXT: lsr x10, x3, x12 -; CHECK-GI-NEXT: csel x9, x9, x13, lo +; CHECK-GI-NEXT: mov w8, #127 // =0x7f +; CHECK-GI-NEXT: lsl x9, x0, #1 +; CHECK-GI-NEXT: extr x10, x1, x0, #63 +; CHECK-GI-NEXT: bic x8, x8, x4 +; CHECK-GI-NEXT: mov w11, #64 // =0x40 +; CHECK-GI-NEXT: and x14, x4, #0x7f +; CHECK-GI-NEXT: sub x12, x11, x8 +; CHECK-GI-NEXT: lsl x13, x10, x8 +; CHECK-GI-NEXT: lsl x16, x9, x8 +; CHECK-GI-NEXT: lsr x12, x9, x12 +; CHECK-GI-NEXT: sub x17, x8, #64 +; CHECK-GI-NEXT: cmp x8, #64 +; CHECK-GI-NEXT: lsl x8, x9, x17 +; CHECK-GI-NEXT: sub x11, x11, x14 +; CHECK-GI-NEXT: mvn x15, x4 +; CHECK-GI-NEXT: orr x12, x12, x13 +; CHECK-GI-NEXT: csel x9, x16, xzr, lo +; CHECK-GI-NEXT: sub x13, x14, #64 +; CHECK-GI-NEXT: lsr x16, x2, x14 +; CHECK-GI-NEXT: lsl x11, x3, x11 +; CHECK-GI-NEXT: csel x8, x12, x8, lo +; CHECK-GI-NEXT: tst x15, #0x7f +; CHECK-GI-NEXT: lsr x12, x3, x13 +; CHECK-GI-NEXT: csel x8, x10, x8, eq +; CHECK-GI-NEXT: orr x10, x16, x11 +; CHECK-GI-NEXT: cmp x14, #64 +; CHECK-GI-NEXT: lsr x11, x3, x14 +; CHECK-GI-NEXT: csel x10, x10, x12, lo ; CHECK-GI-NEXT: tst x4, #0x7f -; CHECK-GI-NEXT: csel x9, x2, x9, eq -; CHECK-GI-NEXT: cmp x12, #64 -; CHECK-GI-NEXT: csel x10, x10, xzr, lo -; CHECK-GI-NEXT: orr x0, x11, x9 -; CHECK-GI-NEXT: orr x1, x8, x10 +; CHECK-GI-NEXT: csel x10, x2, x10, eq +; CHECK-GI-NEXT: cmp x14, #64 +; CHECK-GI-NEXT: csel x11, x11, xzr, lo +; CHECK-GI-NEXT: orr x0, x9, x10 +; CHECK-GI-NEXT: orr x1, x8, x11 ; CHECK-GI-NEXT: ret entry: %d = call i128 @llvm.fshr(i128 %a, i128 %b, i128 %c) @@ -720,10 +718,9 @@ define i128 @rotl_i128_c(i128 %a) { ; ; CHECK-GI-LABEL: rotl_i128_c: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: lsr x8, x0, #61 -; CHECK-GI-NEXT: lsr x9, x1, #61 -; CHECK-GI-NEXT: orr x1, x8, x1, lsl #3 -; CHECK-GI-NEXT: orr x0, x9, x0, lsl #3 +; CHECK-GI-NEXT: extr x8, x1, x0, #61 +; CHECK-GI-NEXT: extr x0, x0, x1, #61 +; CHECK-GI-NEXT: mov x1, x8 ; CHECK-GI-NEXT: ret entry: %d = call i128 @llvm.fshl(i128 %a, i128 %a, i128 3) @@ -731,20 +728,12 @@ entry: } define i128 @rotr_i128_c(i128 %a) { -; CHECK-SD-LABEL: rotr_i128_c: -; CHECK-SD: // %bb.0: // %entry -; CHECK-SD-NEXT: extr x8, x1, x0, #3 -; CHECK-SD-NEXT: extr x1, x0, x1, #3 -; CHECK-SD-NEXT: mov x0, x8 -; CHECK-SD-NEXT: ret -; -; CHECK-GI-LABEL: rotr_i128_c: -; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: lsl x8, x1, #61 -; CHECK-GI-NEXT: lsl x9, x0, #61 -; CHECK-GI-NEXT: orr x0, x8, x0, lsr #3 -; CHECK-GI-NEXT: orr x1, x9, x1, lsr #3 -; CHECK-GI-NEXT: ret +; CHECK-LABEL: rotr_i128_c: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: extr x8, x1, x0, #3 +; CHECK-NEXT: extr x1, x0, x1, #3 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: ret entry: %d = call i128 @llvm.fshr(i128 %a, i128 %a, i128 3) ret i128 %d @@ -868,10 +857,8 @@ define i128 @fshl_i128_c(i128 %a, i128 %b) { ; ; CHECK-GI-LABEL: fshl_i128_c: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: lsr x8, x0, #61 -; CHECK-GI-NEXT: lsr x9, x3, #61 -; CHECK-GI-NEXT: orr x1, x8, x1, lsl #3 -; CHECK-GI-NEXT: orr x0, x9, x0, lsl #3 +; CHECK-GI-NEXT: extr x1, x1, x0, #61 +; CHECK-GI-NEXT: extr x0, x0, x3, #61 ; CHECK-GI-NEXT: ret entry: %d = call i128 @llvm.fshl(i128 %a, i128 %b, i128 3) @@ -879,21 +866,12 @@ entry: } define i128 @fshr_i128_c(i128 %a, i128 %b) { -; CHECK-SD-LABEL: fshr_i128_c: -; CHECK-SD: // %bb.0: // %entry -; CHECK-SD-NEXT: extr x8, x3, x2, #3 -; CHECK-SD-NEXT: extr x1, x0, x3, #3 -; CHECK-SD-NEXT: mov x0, x8 -; CHECK-SD-NEXT: ret -; -; CHECK-GI-LABEL: fshr_i128_c: -; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: lsl x8, x3, #61 -; CHECK-GI-NEXT: lsr x9, x3, #3 -; CHECK-GI-NEXT: orr x8, x8, x2, lsr #3 -; CHECK-GI-NEXT: orr x1, x9, x0, lsl #61 -; CHECK-GI-NEXT: mov x0, x8 -; CHECK-GI-NEXT: ret +; CHECK-LABEL: fshr_i128_c: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: extr x8, x3, x2, #3 +; CHECK-NEXT: extr x1, x0, x3, #3 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: ret entry: %d = call i128 @llvm.fshr(i128 %a, i128 %b, i128 3) ret i128 %d @@ -3013,75 +2991,73 @@ define <2 x i128> @fshl_v2i128(<2 x i128> %a, <2 x i128> %b, <2 x i128> %c) { ; CHECK-GI-NEXT: .cfi_def_cfa_offset 16 ; CHECK-GI-NEXT: .cfi_offset w19, -16 ; CHECK-GI-NEXT: ldr x11, [sp, #16] -; CHECK-GI-NEXT: mov w10, #64 // =0x40 +; CHECK-GI-NEXT: mov w9, #64 // =0x40 ; CHECK-GI-NEXT: ldr x12, [sp, #32] ; CHECK-GI-NEXT: mov w13, #127 // =0x7f -; CHECK-GI-NEXT: and x9, x11, #0x7f +; CHECK-GI-NEXT: and x8, x11, #0x7f ; CHECK-GI-NEXT: and x14, x12, #0x7f -; CHECK-GI-NEXT: mvn x15, x11 -; CHECK-GI-NEXT: sub x8, x10, x9 -; CHECK-GI-NEXT: sub x16, x9, #64 -; CHECK-GI-NEXT: lsl x19, x1, x9 -; CHECK-GI-NEXT: lsr x18, x0, x8 -; CHECK-GI-NEXT: lsl x17, x0, x9 -; CHECK-GI-NEXT: lsl x16, x0, x16 -; CHECK-GI-NEXT: cmp x9, #64 -; CHECK-GI-NEXT: bic x0, x13, x11 -; CHECK-GI-NEXT: mvn x8, x12 -; CHECK-GI-NEXT: orr x18, x18, x19 -; CHECK-GI-NEXT: csel x9, x17, xzr, lo +; CHECK-GI-NEXT: mvn x18, x11 +; CHECK-GI-NEXT: sub x10, x9, x8 +; CHECK-GI-NEXT: sub x15, x8, #64 +; CHECK-GI-NEXT: lsl x17, x1, x8 +; CHECK-GI-NEXT: lsr x16, x0, x10 +; CHECK-GI-NEXT: lsl x15, x0, x15 +; CHECK-GI-NEXT: cmp x8, #64 +; CHECK-GI-NEXT: lsl x19, x0, x8 +; CHECK-GI-NEXT: lsl x0, x3, x14 +; CHECK-GI-NEXT: mvn x10, x12 +; CHECK-GI-NEXT: orr x16, x16, x17 ; CHECK-GI-NEXT: sub x17, x14, #64 -; CHECK-GI-NEXT: csel x16, x18, x16, lo +; CHECK-GI-NEXT: csel x15, x16, x15, lo +; CHECK-GI-NEXT: sub x16, x9, x14 +; CHECK-GI-NEXT: csel x8, x19, xzr, lo +; CHECK-GI-NEXT: lsr x16, x2, x16 ; CHECK-GI-NEXT: tst x11, #0x7f -; CHECK-GI-NEXT: sub x11, x10, x14 -; CHECK-GI-NEXT: lsr x11, x2, x11 -; CHECK-GI-NEXT: lsl x18, x3, x14 -; CHECK-GI-NEXT: csel x16, x1, x16, eq -; CHECK-GI-NEXT: lsl x1, x2, x14 +; CHECK-GI-NEXT: lsl x19, x2, x14 ; CHECK-GI-NEXT: lsl x17, x2, x17 +; CHECK-GI-NEXT: csel x15, x1, x15, eq ; CHECK-GI-NEXT: cmp x14, #64 -; CHECK-GI-NEXT: lsl x14, x5, #63 -; CHECK-GI-NEXT: orr x11, x11, x18 -; CHECK-GI-NEXT: bic x13, x13, x12 -; CHECK-GI-NEXT: csel x18, x1, xzr, lo -; CHECK-GI-NEXT: csel x11, x11, x17, lo +; CHECK-GI-NEXT: orr x16, x16, x0 +; CHECK-GI-NEXT: bic x11, x13, x11 +; CHECK-GI-NEXT: csel x14, x19, xzr, lo +; CHECK-GI-NEXT: csel x16, x16, x17, lo ; CHECK-GI-NEXT: tst x12, #0x7f -; CHECK-GI-NEXT: lsr x12, x5, #1 -; CHECK-GI-NEXT: orr x14, x14, x4, lsr #1 -; CHECK-GI-NEXT: lsl x17, x7, #63 -; CHECK-GI-NEXT: sub x1, x10, x0 -; CHECK-GI-NEXT: csel x11, x3, x11, eq -; CHECK-GI-NEXT: sub x2, x0, #64 -; CHECK-GI-NEXT: lsr x3, x14, x0 -; CHECK-GI-NEXT: lsl x1, x12, x1 -; CHECK-GI-NEXT: lsr x4, x7, #1 -; CHECK-GI-NEXT: orr x17, x17, x6, lsr #1 -; CHECK-GI-NEXT: lsr x2, x12, x2 -; CHECK-GI-NEXT: cmp x0, #64 -; CHECK-GI-NEXT: orr x1, x3, x1 -; CHECK-GI-NEXT: sub x10, x10, x13 -; CHECK-GI-NEXT: lsr x12, x12, x0 -; CHECK-GI-NEXT: csel x1, x1, x2, lo -; CHECK-GI-NEXT: tst x15, #0x7f -; CHECK-GI-NEXT: sub x15, x13, #64 -; CHECK-GI-NEXT: lsr x2, x17, x13 -; CHECK-GI-NEXT: lsl x10, x4, x10 -; CHECK-GI-NEXT: csel x14, x14, x1, eq -; CHECK-GI-NEXT: cmp x0, #64 -; CHECK-GI-NEXT: lsr x15, x4, x15 -; CHECK-GI-NEXT: lsr x0, x4, x13 -; CHECK-GI-NEXT: csel x12, x12, xzr, lo -; CHECK-GI-NEXT: orr x10, x2, x10 -; CHECK-GI-NEXT: cmp x13, #64 -; CHECK-GI-NEXT: csel x10, x10, x15, lo -; CHECK-GI-NEXT: tst x8, #0x7f -; CHECK-GI-NEXT: orr x1, x16, x12 -; CHECK-GI-NEXT: csel x8, x17, x10, eq -; CHECK-GI-NEXT: cmp x13, #64 -; CHECK-GI-NEXT: csel x10, x0, xzr, lo -; CHECK-GI-NEXT: orr x0, x9, x14 -; CHECK-GI-NEXT: orr x2, x18, x8 -; CHECK-GI-NEXT: orr x3, x11, x10 +; CHECK-GI-NEXT: lsr x17, x5, #1 +; CHECK-GI-NEXT: extr x0, x5, x4, #1 +; CHECK-GI-NEXT: bic x12, x13, x12 +; CHECK-GI-NEXT: csel x13, x3, x16, eq +; CHECK-GI-NEXT: sub x16, x9, x11 +; CHECK-GI-NEXT: sub x1, x11, #64 +; CHECK-GI-NEXT: lsr x3, x7, #1 +; CHECK-GI-NEXT: lsr x2, x0, x11 +; CHECK-GI-NEXT: lsl x16, x17, x16 +; CHECK-GI-NEXT: extr x4, x7, x6, #1 +; CHECK-GI-NEXT: lsr x1, x17, x1 +; CHECK-GI-NEXT: cmp x11, #64 +; CHECK-GI-NEXT: sub x9, x9, x12 +; CHECK-GI-NEXT: orr x16, x2, x16 +; CHECK-GI-NEXT: lsr x17, x17, x11 +; CHECK-GI-NEXT: lsl x9, x3, x9 +; CHECK-GI-NEXT: csel x16, x16, x1, lo +; CHECK-GI-NEXT: tst x18, #0x7f +; CHECK-GI-NEXT: sub x18, x12, #64 +; CHECK-GI-NEXT: lsr x1, x4, x12 +; CHECK-GI-NEXT: csel x16, x0, x16, eq +; CHECK-GI-NEXT: cmp x11, #64 +; CHECK-GI-NEXT: lsr x11, x3, x18 +; CHECK-GI-NEXT: csel x17, x17, xzr, lo +; CHECK-GI-NEXT: cmp x12, #64 +; CHECK-GI-NEXT: orr x9, x1, x9 +; CHECK-GI-NEXT: lsr x18, x3, x12 +; CHECK-GI-NEXT: orr x0, x8, x16 +; CHECK-GI-NEXT: csel x9, x9, x11, lo +; CHECK-GI-NEXT: tst x10, #0x7f +; CHECK-GI-NEXT: orr x1, x15, x17 +; CHECK-GI-NEXT: csel x9, x4, x9, eq +; CHECK-GI-NEXT: cmp x12, #64 +; CHECK-GI-NEXT: csel x10, x18, xzr, lo +; CHECK-GI-NEXT: orr x2, x14, x9 +; CHECK-GI-NEXT: orr x3, x13, x10 ; CHECK-GI-NEXT: ldr x19, [sp], #16 // 8-byte Folded Reload ; CHECK-GI-NEXT: ret entry: @@ -3125,75 +3101,73 @@ define <2 x i128> @fshr_v2i128(<2 x i128> %a, <2 x i128> %b, <2 x i128> %c) { ; CHECK-GI-LABEL: fshr_v2i128: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: ldr x9, [sp] -; CHECK-GI-NEXT: lsl x12, x1, #1 -; CHECK-GI-NEXT: mov w11, #127 // =0x7f -; CHECK-GI-NEXT: mov w14, #64 // =0x40 -; CHECK-GI-NEXT: lsl x15, x0, #1 +; CHECK-GI-NEXT: mov w10, #127 // =0x7f +; CHECK-GI-NEXT: mov w12, #64 // =0x40 +; CHECK-GI-NEXT: lsl x13, x0, #1 +; CHECK-GI-NEXT: extr x14, x1, x0, #63 ; CHECK-GI-NEXT: ldr x8, [sp, #16] -; CHECK-GI-NEXT: bic x13, x11, x9 -; CHECK-GI-NEXT: orr x12, x12, x0, lsr #63 -; CHECK-GI-NEXT: lsl x1, x3, #1 -; CHECK-GI-NEXT: sub x17, x14, x13 -; CHECK-GI-NEXT: sub x18, x13, #64 -; CHECK-GI-NEXT: lsl x3, x15, x13 -; CHECK-GI-NEXT: lsr x17, x15, x17 -; CHECK-GI-NEXT: lsl x0, x12, x13 -; CHECK-GI-NEXT: lsl x15, x15, x18 -; CHECK-GI-NEXT: bic x11, x11, x8 +; CHECK-GI-NEXT: bic x11, x10, x9 +; CHECK-GI-NEXT: mvn x16, x9 +; CHECK-GI-NEXT: and x15, x9, #0x7f +; CHECK-GI-NEXT: sub x17, x12, x11 +; CHECK-GI-NEXT: sub x18, x11, #64 +; CHECK-GI-NEXT: lsl x0, x14, x11 +; CHECK-GI-NEXT: lsr x17, x13, x17 +; CHECK-GI-NEXT: lsl x1, x13, x11 +; CHECK-GI-NEXT: lsl x13, x13, x18 +; CHECK-GI-NEXT: bic x10, x10, x8 ; CHECK-GI-NEXT: lsl x18, x2, #1 -; CHECK-GI-NEXT: cmp x13, #64 +; CHECK-GI-NEXT: cmp x11, #64 ; CHECK-GI-NEXT: orr x17, x17, x0 -; CHECK-GI-NEXT: orr x13, x1, x2, lsr #63 -; CHECK-GI-NEXT: mvn x16, x9 -; CHECK-GI-NEXT: csel x15, x17, x15, lo -; CHECK-GI-NEXT: sub x17, x14, x11 -; CHECK-GI-NEXT: csel x0, x3, xzr, lo +; CHECK-GI-NEXT: extr x11, x3, x2, #63 +; CHECK-GI-NEXT: csel x0, x1, xzr, lo +; CHECK-GI-NEXT: csel x13, x17, x13, lo +; CHECK-GI-NEXT: sub x17, x12, x10 ; CHECK-GI-NEXT: tst x16, #0x7f -; CHECK-GI-NEXT: sub x16, x11, #64 +; CHECK-GI-NEXT: sub x16, x10, #64 ; CHECK-GI-NEXT: lsr x17, x18, x17 -; CHECK-GI-NEXT: lsl x2, x13, x11 -; CHECK-GI-NEXT: lsl x1, x18, x11 -; CHECK-GI-NEXT: csel x12, x12, x15, eq -; CHECK-GI-NEXT: lsl x15, x18, x16 -; CHECK-GI-NEXT: and x10, x9, #0x7f -; CHECK-GI-NEXT: cmp x11, #64 -; CHECK-GI-NEXT: mvn x11, x8 +; CHECK-GI-NEXT: lsl x2, x11, x10 +; CHECK-GI-NEXT: lsl x1, x18, x10 +; CHECK-GI-NEXT: csel x13, x14, x13, eq +; CHECK-GI-NEXT: lsl x14, x18, x16 +; CHECK-GI-NEXT: cmp x10, #64 +; CHECK-GI-NEXT: mvn x10, x8 ; CHECK-GI-NEXT: orr x16, x17, x2 ; CHECK-GI-NEXT: csel x17, x1, xzr, lo -; CHECK-GI-NEXT: csel x15, x16, x15, lo -; CHECK-GI-NEXT: tst x11, #0x7f -; CHECK-GI-NEXT: sub x11, x14, x10 -; CHECK-GI-NEXT: sub x16, x10, #64 -; CHECK-GI-NEXT: lsr x18, x4, x10 -; CHECK-GI-NEXT: lsl x11, x5, x11 -; CHECK-GI-NEXT: csel x13, x13, x15, eq -; CHECK-GI-NEXT: lsr x15, x5, x16 +; CHECK-GI-NEXT: csel x14, x16, x14, lo +; CHECK-GI-NEXT: tst x10, #0x7f +; CHECK-GI-NEXT: sub x10, x12, x15 +; CHECK-GI-NEXT: sub x16, x15, #64 +; CHECK-GI-NEXT: lsr x18, x4, x15 +; CHECK-GI-NEXT: lsl x10, x5, x10 +; CHECK-GI-NEXT: csel x11, x11, x14, eq +; CHECK-GI-NEXT: lsr x14, x5, x16 ; CHECK-GI-NEXT: and x1, x8, #0x7f -; CHECK-GI-NEXT: orr x11, x18, x11 -; CHECK-GI-NEXT: cmp x10, #64 -; CHECK-GI-NEXT: lsr x16, x5, x10 -; CHECK-GI-NEXT: csel x11, x11, x15, lo +; CHECK-GI-NEXT: cmp x15, #64 +; CHECK-GI-NEXT: lsr x16, x5, x15 +; CHECK-GI-NEXT: orr x10, x18, x10 +; CHECK-GI-NEXT: csel x10, x10, x14, lo ; CHECK-GI-NEXT: tst x9, #0x7f -; CHECK-GI-NEXT: sub x9, x14, x1 -; CHECK-GI-NEXT: sub x14, x1, #64 -; CHECK-GI-NEXT: lsr x15, x6, x1 +; CHECK-GI-NEXT: sub x9, x12, x1 +; CHECK-GI-NEXT: sub x12, x1, #64 +; CHECK-GI-NEXT: lsr x14, x6, x1 ; CHECK-GI-NEXT: lsl x9, x7, x9 -; CHECK-GI-NEXT: csel x11, x4, x11, eq -; CHECK-GI-NEXT: cmp x10, #64 -; CHECK-GI-NEXT: lsr x10, x7, x14 -; CHECK-GI-NEXT: csel x14, x16, xzr, lo -; CHECK-GI-NEXT: orr x9, x15, x9 +; CHECK-GI-NEXT: csel x10, x4, x10, eq +; CHECK-GI-NEXT: cmp x15, #64 +; CHECK-GI-NEXT: lsr x12, x7, x12 +; CHECK-GI-NEXT: csel x15, x16, xzr, lo +; CHECK-GI-NEXT: orr x9, x14, x9 ; CHECK-GI-NEXT: cmp x1, #64 -; CHECK-GI-NEXT: lsr x15, x7, x1 -; CHECK-GI-NEXT: csel x9, x9, x10, lo +; CHECK-GI-NEXT: lsr x14, x7, x1 +; CHECK-GI-NEXT: csel x9, x9, x12, lo ; CHECK-GI-NEXT: tst x8, #0x7f ; CHECK-GI-NEXT: csel x8, x6, x9, eq ; CHECK-GI-NEXT: cmp x1, #64 -; CHECK-GI-NEXT: orr x0, x0, x11 -; CHECK-GI-NEXT: csel x9, x15, xzr, lo -; CHECK-GI-NEXT: orr x1, x12, x14 +; CHECK-GI-NEXT: orr x0, x0, x10 +; CHECK-GI-NEXT: csel x9, x14, xzr, lo +; CHECK-GI-NEXT: orr x1, x13, x15 ; CHECK-GI-NEXT: orr x2, x17, x8 -; CHECK-GI-NEXT: orr x3, x13, x9 +; CHECK-GI-NEXT: orr x3, x11, x9 ; CHECK-GI-NEXT: ret entry: %d = call <2 x i128> @llvm.fshr(<2 x i128> %a, <2 x i128> %b, <2 x i128> %c) @@ -3863,15 +3837,12 @@ define <2 x i128> @rotl_v2i128_c(<2 x i128> %a) { ; ; CHECK-GI-LABEL: rotl_v2i128_c: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: lsr x8, x1, #61 -; CHECK-GI-NEXT: lsl x9, x1, #3 -; CHECK-GI-NEXT: lsl x10, x3, #3 -; CHECK-GI-NEXT: lsr x11, x3, #61 -; CHECK-GI-NEXT: orr x8, x8, x0, lsl #3 -; CHECK-GI-NEXT: orr x1, x9, x0, lsr #61 -; CHECK-GI-NEXT: orr x3, x10, x2, lsr #61 -; CHECK-GI-NEXT: orr x2, x11, x2, lsl #3 +; CHECK-GI-NEXT: extr x8, x0, x1, #61 +; CHECK-GI-NEXT: extr x9, x3, x2, #61 +; CHECK-GI-NEXT: extr x1, x1, x0, #61 +; CHECK-GI-NEXT: extr x2, x2, x3, #61 ; CHECK-GI-NEXT: mov x0, x8 +; CHECK-GI-NEXT: mov x3, x9 ; CHECK-GI-NEXT: ret entry: %d = call <2 x i128> @llvm.fshl(<2 x i128> %a, <2 x i128> %a, <2 x i128> <i128 3, i128 3>) @@ -3891,14 +3862,12 @@ define <2 x i128> @rotr_v2i128_c(<2 x i128> %a) { ; ; CHECK-GI-LABEL: rotr_v2i128_c: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: lsl x8, x1, #61 -; CHECK-GI-NEXT: lsl x9, x3, #61 -; CHECK-GI-NEXT: lsl x10, x0, #61 -; CHECK-GI-NEXT: lsl x11, x2, #61 -; CHECK-GI-NEXT: orr x0, x8, x0, lsr #3 -; CHECK-GI-NEXT: orr x2, x9, x2, lsr #3 -; CHECK-GI-NEXT: orr x1, x10, x1, lsr #3 -; CHECK-GI-NEXT: orr x3, x11, x3, lsr #3 +; CHECK-GI-NEXT: extr x8, x1, x0, #3 +; CHECK-GI-NEXT: extr x9, x3, x2, #3 +; CHECK-GI-NEXT: extr x1, x0, x1, #3 +; CHECK-GI-NEXT: extr x3, x2, x3, #3 +; CHECK-GI-NEXT: mov x0, x8 +; CHECK-GI-NEXT: mov x2, x9 ; CHECK-GI-NEXT: ret entry: %d = call <2 x i128> @llvm.fshr(<2 x i128> %a, <2 x i128> %a, <2 x i128> <i128 3, i128 3>) @@ -4464,14 +4433,10 @@ define <2 x i128> @fshl_v2i128_c(<2 x i128> %a, <2 x i128> %b) { ; ; CHECK-GI-LABEL: fshl_v2i128_c: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: lsr x8, x5, #61 -; CHECK-GI-NEXT: lsl x9, x1, #3 -; CHECK-GI-NEXT: lsl x10, x3, #3 -; CHECK-GI-NEXT: lsr x11, x7, #61 -; CHECK-GI-NEXT: orr x8, x8, x0, lsl #3 -; CHECK-GI-NEXT: orr x1, x9, x0, lsr #61 -; CHECK-GI-NEXT: orr x3, x10, x2, lsr #61 -; CHECK-GI-NEXT: orr x2, x11, x2, lsl #3 +; CHECK-GI-NEXT: extr x8, x0, x5, #61 +; CHECK-GI-NEXT: extr x1, x1, x0, #61 +; CHECK-GI-NEXT: extr x3, x3, x2, #61 +; CHECK-GI-NEXT: extr x2, x2, x7, #61 ; CHECK-GI-NEXT: mov x0, x8 ; CHECK-GI-NEXT: ret entry: @@ -4480,29 +4445,15 @@ entry: } define <2 x i128> @fshr_v2i128_c(<2 x i128> %a, <2 x i128> %b) { -; CHECK-SD-LABEL: fshr_v2i128_c: -; CHECK-SD: // %bb.0: // %entry -; CHECK-SD-NEXT: extr x8, x5, x4, #3 -; CHECK-SD-NEXT: extr x9, x7, x6, #3 -; CHECK-SD-NEXT: extr x1, x0, x5, #3 -; CHECK-SD-NEXT: extr x3, x2, x7, #3 -; CHECK-SD-NEXT: mov x0, x8 -; CHECK-SD-NEXT: mov x2, x9 -; CHECK-SD-NEXT: ret -; -; CHECK-GI-LABEL: fshr_v2i128_c: -; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: lsl x8, x5, #61 -; CHECK-GI-NEXT: lsl x9, x7, #61 -; CHECK-GI-NEXT: lsr x10, x5, #3 -; CHECK-GI-NEXT: lsr x11, x7, #3 -; CHECK-GI-NEXT: orr x8, x8, x4, lsr #3 -; CHECK-GI-NEXT: orr x9, x9, x6, lsr #3 -; CHECK-GI-NEXT: orr x1, x10, x0, lsl #61 -; CHECK-GI-NEXT: orr x3, x11, x2, lsl #61 -; CHECK-GI-NEXT: mov x0, x8 -; CHECK-GI-NEXT: mov x2, x9 -; CHECK-GI-NEXT: ret +; CHECK-LABEL: fshr_v2i128_c: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: extr x8, x5, x4, #3 +; CHECK-NEXT: extr x9, x7, x6, #3 +; CHECK-NEXT: extr x1, x0, x5, #3 +; CHECK-NEXT: extr x3, x2, x7, #3 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: mov x2, x9 +; CHECK-NEXT: ret entry: %d = call <2 x i128> @llvm.fshr(<2 x i128> %a, <2 x i128> %b, <2 x i128> <i128 3, i128 3>) ret <2 x i128> %d diff --git a/llvm/test/CodeGen/AArch64/funnel-shift.ll b/llvm/test/CodeGen/AArch64/funnel-shift.ll index f9fd2ad..90fb102 100644 --- a/llvm/test/CodeGen/AArch64/funnel-shift.ll +++ b/llvm/test/CodeGen/AArch64/funnel-shift.ll @@ -85,41 +85,40 @@ define i128 @fshl_i128(i128 %x, i128 %y, i128 %z) nounwind { ; ; CHECK-GI-LABEL: fshl_i128: ; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #64 // =0x40 ; CHECK-GI-NEXT: and x9, x4, #0x7f -; CHECK-GI-NEXT: mov w10, #64 // =0x40 -; CHECK-GI-NEXT: lsl x14, x3, #63 -; CHECK-GI-NEXT: sub x12, x10, x9 +; CHECK-GI-NEXT: mov w10, #127 // =0x7f +; CHECK-GI-NEXT: sub x12, x8, x9 ; CHECK-GI-NEXT: lsl x13, x1, x9 -; CHECK-GI-NEXT: mov w8, #127 // =0x7f +; CHECK-GI-NEXT: bic x10, x10, x4 ; CHECK-GI-NEXT: lsr x12, x0, x12 -; CHECK-GI-NEXT: bic x8, x8, x4 -; CHECK-GI-NEXT: sub x15, x9, #64 +; CHECK-GI-NEXT: sub x14, x9, #64 +; CHECK-GI-NEXT: lsl x15, x0, x9 +; CHECK-GI-NEXT: extr x16, x3, x2, #1 ; CHECK-GI-NEXT: cmp x9, #64 -; CHECK-GI-NEXT: lsl x9, x0, x9 -; CHECK-GI-NEXT: lsl x15, x0, x15 -; CHECK-GI-NEXT: orr x12, x12, x13 -; CHECK-GI-NEXT: orr x13, x14, x2, lsr #1 -; CHECK-GI-NEXT: lsr x14, x3, #1 -; CHECK-GI-NEXT: sub x10, x10, x8 -; CHECK-GI-NEXT: sub x16, x8, #64 -; CHECK-GI-NEXT: csel x9, x9, xzr, lo -; CHECK-GI-NEXT: lsr x17, x13, x8 -; CHECK-GI-NEXT: lsl x10, x14, x10 -; CHECK-GI-NEXT: csel x12, x12, x15, lo +; CHECK-GI-NEXT: sub x8, x8, x10 +; CHECK-GI-NEXT: orr x9, x12, x13 +; CHECK-GI-NEXT: lsr x12, x3, #1 +; CHECK-GI-NEXT: lsl x13, x0, x14 +; CHECK-GI-NEXT: csel x14, x15, xzr, lo +; CHECK-GI-NEXT: sub x15, x10, #64 +; CHECK-GI-NEXT: lsr x17, x16, x10 +; CHECK-GI-NEXT: lsl x8, x12, x8 +; CHECK-GI-NEXT: csel x9, x9, x13, lo ; CHECK-GI-NEXT: tst x4, #0x7f -; CHECK-GI-NEXT: lsr x15, x14, x16 +; CHECK-GI-NEXT: lsr x13, x12, x15 ; CHECK-GI-NEXT: mvn x11, x4 -; CHECK-GI-NEXT: csel x12, x1, x12, eq -; CHECK-GI-NEXT: orr x10, x17, x10 -; CHECK-GI-NEXT: cmp x8, #64 -; CHECK-GI-NEXT: lsr x14, x14, x8 -; CHECK-GI-NEXT: csel x10, x10, x15, lo +; CHECK-GI-NEXT: csel x9, x1, x9, eq +; CHECK-GI-NEXT: orr x8, x17, x8 +; CHECK-GI-NEXT: cmp x10, #64 +; CHECK-GI-NEXT: lsr x12, x12, x10 +; CHECK-GI-NEXT: csel x8, x8, x13, lo ; CHECK-GI-NEXT: tst x11, #0x7f -; CHECK-GI-NEXT: csel x10, x13, x10, eq -; CHECK-GI-NEXT: cmp x8, #64 -; CHECK-GI-NEXT: csel x8, x14, xzr, lo -; CHECK-GI-NEXT: orr x0, x9, x10 -; CHECK-GI-NEXT: orr x1, x12, x8 +; CHECK-GI-NEXT: csel x8, x16, x8, eq +; CHECK-GI-NEXT: cmp x10, #64 +; CHECK-GI-NEXT: csel x10, x12, xzr, lo +; CHECK-GI-NEXT: orr x0, x14, x8 +; CHECK-GI-NEXT: orr x1, x9, x10 ; CHECK-GI-NEXT: ret %f = call i128 @llvm.fshl.i128(i128 %x, i128 %y, i128 %z) ret i128 %f diff --git a/llvm/test/CodeGen/AArch64/rem-by-const.ll b/llvm/test/CodeGen/AArch64/rem-by-const.ll index 1cb92e4..87b1108 100644 --- a/llvm/test/CodeGen/AArch64/rem-by-const.ll +++ b/llvm/test/CodeGen/AArch64/rem-by-const.ll @@ -559,20 +559,18 @@ define i128 @ui128_7(i128 %a, i128 %b) { ; CHECK-GI-NEXT: add x8, x8, x10 ; CHECK-GI-NEXT: subs x10, x0, x9 ; CHECK-GI-NEXT: sbc x11, x1, x8 -; CHECK-GI-NEXT: lsl x12, x11, #63 +; CHECK-GI-NEXT: extr x10, x11, x10, #1 ; CHECK-GI-NEXT: lsr x11, x11, #1 -; CHECK-GI-NEXT: orr x10, x12, x10, lsr #1 ; CHECK-GI-NEXT: adds x9, x10, x9 +; CHECK-GI-NEXT: mov w10, #7 // =0x7 ; CHECK-GI-NEXT: adc x8, x11, x8 -; CHECK-GI-NEXT: lsl x10, x8, #62 +; CHECK-GI-NEXT: extr x9, x8, x9, #2 ; CHECK-GI-NEXT: lsr x8, x8, #2 -; CHECK-GI-NEXT: orr x9, x10, x9, lsr #2 -; CHECK-GI-NEXT: mov w10, #7 // =0x7 -; CHECK-GI-NEXT: lsl x12, x8, #3 ; CHECK-GI-NEXT: umulh x10, x9, x10 ; CHECK-GI-NEXT: lsl x11, x9, #3 -; CHECK-GI-NEXT: sub x8, x12, x8 +; CHECK-GI-NEXT: lsl x12, x8, #3 ; CHECK-GI-NEXT: sub x9, x11, x9 +; CHECK-GI-NEXT: sub x8, x12, x8 ; CHECK-GI-NEXT: subs x0, x0, x9 ; CHECK-GI-NEXT: add x8, x8, x10 ; CHECK-GI-NEXT: sbc x1, x1, x8 @@ -640,10 +638,9 @@ define i128 @ui128_100(i128 %a, i128 %b) { ; CHECK-GI-NEXT: add x10, x11, x12 ; CHECK-GI-NEXT: add x8, x8, x14 ; CHECK-GI-NEXT: add x8, x8, x10 -; CHECK-GI-NEXT: lsl x10, x8, #60 -; CHECK-GI-NEXT: lsr x8, x8, #4 -; CHECK-GI-NEXT: orr x9, x10, x9, lsr #4 ; CHECK-GI-NEXT: mov w10, #100 // =0x64 +; CHECK-GI-NEXT: extr x9, x8, x9, #4 +; CHECK-GI-NEXT: lsr x8, x8, #4 ; CHECK-GI-NEXT: umulh x11, x9, x10 ; CHECK-GI-NEXT: mul x9, x9, x10 ; CHECK-GI-NEXT: madd x8, x8, x10, x11 @@ -3317,36 +3314,32 @@ define <2 x i128> @uv2i128_7(<2 x i128> %d, <2 x i128> %e) { ; CHECK-GI-NEXT: sbc x14, x1, x12 ; CHECK-GI-NEXT: add x8, x8, x13 ; CHECK-GI-NEXT: subs x13, x2, x10 -; CHECK-GI-NEXT: lsl x15, x14, #63 -; CHECK-GI-NEXT: sbc x16, x3, x8 +; CHECK-GI-NEXT: extr x9, x14, x9, #1 +; CHECK-GI-NEXT: sbc x15, x3, x8 ; CHECK-GI-NEXT: lsr x14, x14, #1 -; CHECK-GI-NEXT: orr x9, x15, x9, lsr #1 -; CHECK-GI-NEXT: lsl x15, x16, #63 -; CHECK-GI-NEXT: orr x13, x15, x13, lsr #1 +; CHECK-GI-NEXT: extr x13, x15, x13, #1 ; CHECK-GI-NEXT: adds x9, x9, x11 -; CHECK-GI-NEXT: lsr x11, x16, #1 +; CHECK-GI-NEXT: lsr x11, x15, #1 ; CHECK-GI-NEXT: adc x12, x14, x12 ; CHECK-GI-NEXT: adds x10, x13, x10 -; CHECK-GI-NEXT: lsl x13, x12, #62 -; CHECK-GI-NEXT: lsr x12, x12, #2 -; CHECK-GI-NEXT: adc x8, x11, x8 -; CHECK-GI-NEXT: lsl x11, x8, #62 -; CHECK-GI-NEXT: orr x9, x13, x9, lsr #2 +; CHECK-GI-NEXT: extr x9, x12, x9, #2 ; CHECK-GI-NEXT: mov w13, #7 // =0x7 +; CHECK-GI-NEXT: adc x8, x11, x8 +; CHECK-GI-NEXT: lsr x11, x12, #2 +; CHECK-GI-NEXT: extr x10, x8, x10, #2 +; CHECK-GI-NEXT: umulh x12, x9, x13 ; CHECK-GI-NEXT: lsr x8, x8, #2 -; CHECK-GI-NEXT: lsl x14, x12, #3 -; CHECK-GI-NEXT: orr x10, x11, x10, lsr #2 -; CHECK-GI-NEXT: umulh x11, x9, x13 +; CHECK-GI-NEXT: lsl x14, x11, #3 ; CHECK-GI-NEXT: lsl x15, x9, #3 -; CHECK-GI-NEXT: sub x12, x14, x12 -; CHECK-GI-NEXT: lsl x16, x8, #3 ; CHECK-GI-NEXT: umulh x13, x10, x13 +; CHECK-GI-NEXT: lsl x16, x8, #3 +; CHECK-GI-NEXT: sub x11, x14, x11 ; CHECK-GI-NEXT: lsl x14, x10, #3 ; CHECK-GI-NEXT: sub x9, x15, x9 ; CHECK-GI-NEXT: sub x8, x16, x8 ; CHECK-GI-NEXT: subs x0, x0, x9 +; CHECK-GI-NEXT: add x11, x11, x12 ; CHECK-GI-NEXT: sub x10, x14, x10 -; CHECK-GI-NEXT: add x11, x12, x11 ; CHECK-GI-NEXT: sbc x1, x1, x11 ; CHECK-GI-NEXT: subs x2, x2, x10 ; CHECK-GI-NEXT: add x8, x8, x13 @@ -3394,9 +3387,10 @@ define <2 x i128> @uv2i128_100(<2 x i128> %d, <2 x i128> %e) { ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: mov x10, #23593 // =0x5c29 ; CHECK-GI-NEXT: mov x8, #62914 // =0xf5c2 -; CHECK-GI-NEXT: sub x18, x0, x0 +; CHECK-GI-NEXT: and x5, xzr, #0x1 ; CHECK-GI-NEXT: movk x10, #49807, lsl #16 ; CHECK-GI-NEXT: movk x8, #23592, lsl #16 +; CHECK-GI-NEXT: umulh x18, x0, xzr ; CHECK-GI-NEXT: movk x10, #10485, lsl #32 ; CHECK-GI-NEXT: movk x8, #49807, lsl #32 ; CHECK-GI-NEXT: movk x10, #36700, lsl #48 @@ -3409,84 +3403,81 @@ define <2 x i128> @uv2i128_100(<2 x i128> %d, <2 x i128> %e) { ; CHECK-GI-NEXT: umulh x15, x1, x10 ; CHECK-GI-NEXT: cset w12, hs ; CHECK-GI-NEXT: cmn x11, x13 -; CHECK-GI-NEXT: and x11, x12, #0x1 -; CHECK-GI-NEXT: umulh x16, x0, x8 -; CHECK-GI-NEXT: cset w12, hs +; CHECK-GI-NEXT: sub x13, x0, x0 ; CHECK-GI-NEXT: and x12, x12, #0x1 -; CHECK-GI-NEXT: add x14, x14, x18 -; CHECK-GI-NEXT: add x11, x11, x12 -; CHECK-GI-NEXT: and x12, xzr, #0x1 +; CHECK-GI-NEXT: umulh x16, x0, x8 +; CHECK-GI-NEXT: cset w11, hs +; CHECK-GI-NEXT: add x13, x14, x13 +; CHECK-GI-NEXT: and x11, x11, #0x1 +; CHECK-GI-NEXT: and x14, xzr, #0x1 ; CHECK-GI-NEXT: umulh x9, xzr, x10 -; CHECK-GI-NEXT: adds x14, x14, x15 -; CHECK-GI-NEXT: and x15, xzr, #0x1 +; CHECK-GI-NEXT: add x11, x12, x11 +; CHECK-GI-NEXT: add x12, x5, x14 +; CHECK-GI-NEXT: adds x13, x13, x15 ; CHECK-GI-NEXT: umulh x17, x1, x8 -; CHECK-GI-NEXT: cset w4, hs -; CHECK-GI-NEXT: add x15, x12, x15 -; CHECK-GI-NEXT: adds x12, x14, x16 -; CHECK-GI-NEXT: and x4, x4, #0x1 -; CHECK-GI-NEXT: mul x18, x3, x10 ; CHECK-GI-NEXT: cset w14, hs -; CHECK-GI-NEXT: adds x12, x12, x11 -; CHECK-GI-NEXT: add x11, x15, x4 ; CHECK-GI-NEXT: and x14, x14, #0x1 -; CHECK-GI-NEXT: cset w15, hs -; CHECK-GI-NEXT: mul x5, x2, x8 -; CHECK-GI-NEXT: add x11, x11, x14 -; CHECK-GI-NEXT: and x14, x15, #0x1 -; CHECK-GI-NEXT: add x17, x9, x17 -; CHECK-GI-NEXT: add x14, x11, x14 -; CHECK-GI-NEXT: mov w11, #100 // =0x64 -; CHECK-GI-NEXT: umulh x13, x0, xzr -; CHECK-GI-NEXT: umulh x16, x2, x10 -; CHECK-GI-NEXT: adds x18, x18, x5 -; CHECK-GI-NEXT: mul x15, x3, x8 -; CHECK-GI-NEXT: add x13, x17, x13 -; CHECK-GI-NEXT: cset w17, hs -; CHECK-GI-NEXT: umulh x10, x3, x10 -; CHECK-GI-NEXT: add x13, x13, x14 -; CHECK-GI-NEXT: and x17, x17, #0x1 -; CHECK-GI-NEXT: cmn x18, x16 -; CHECK-GI-NEXT: sub x18, x2, x2 -; CHECK-GI-NEXT: umulh x16, x2, x8 +; CHECK-GI-NEXT: adds x13, x13, x16 +; CHECK-GI-NEXT: mul x4, x3, x10 +; CHECK-GI-NEXT: add x12, x12, x14 ; CHECK-GI-NEXT: cset w14, hs -; CHECK-GI-NEXT: and x14, x14, #0x1 -; CHECK-GI-NEXT: add x15, x15, x18 +; CHECK-GI-NEXT: adds x11, x13, x11 +; CHECK-GI-NEXT: and x13, x14, #0x1 +; CHECK-GI-NEXT: mul x15, x2, x8 +; CHECK-GI-NEXT: cset w14, hs +; CHECK-GI-NEXT: add x12, x12, x13 +; CHECK-GI-NEXT: and x13, x14, #0x1 +; CHECK-GI-NEXT: add x14, x9, x17 +; CHECK-GI-NEXT: sub x17, x2, x2 +; CHECK-GI-NEXT: umulh x16, x2, x10 +; CHECK-GI-NEXT: add x12, x12, x13 +; CHECK-GI-NEXT: add x13, x14, x18 +; CHECK-GI-NEXT: add x12, x13, x12 ; CHECK-GI-NEXT: and x18, xzr, #0x1 -; CHECK-GI-NEXT: add x14, x17, x14 +; CHECK-GI-NEXT: mul x5, x3, x8 +; CHECK-GI-NEXT: extr x11, x12, x11, #4 +; CHECK-GI-NEXT: adds x13, x4, x15 +; CHECK-GI-NEXT: umulh x14, x3, x10 +; CHECK-GI-NEXT: cset w15, hs +; CHECK-GI-NEXT: mov w10, #100 // =0x64 +; CHECK-GI-NEXT: cmn x13, x16 +; CHECK-GI-NEXT: and x15, x15, #0x1 +; CHECK-GI-NEXT: umulh x13, x2, x8 +; CHECK-GI-NEXT: cset w16, hs +; CHECK-GI-NEXT: add x17, x5, x17 +; CHECK-GI-NEXT: and x16, x16, #0x1 ; CHECK-GI-NEXT: umulh x8, x3, x8 +; CHECK-GI-NEXT: add x15, x15, x16 +; CHECK-GI-NEXT: adds x14, x17, x14 ; CHECK-GI-NEXT: and x17, xzr, #0x1 -; CHECK-GI-NEXT: adds x10, x15, x10 -; CHECK-GI-NEXT: add x15, x17, x18 +; CHECK-GI-NEXT: add x16, x18, x17 ; CHECK-GI-NEXT: cset w17, hs -; CHECK-GI-NEXT: umulh x18, x2, xzr +; CHECK-GI-NEXT: adds x13, x14, x13 +; CHECK-GI-NEXT: umulh x14, x2, xzr ; CHECK-GI-NEXT: and x17, x17, #0x1 -; CHECK-GI-NEXT: adds x10, x10, x16 -; CHECK-GI-NEXT: lsl x16, x13, #60 -; CHECK-GI-NEXT: add x15, x15, x17 -; CHECK-GI-NEXT: cset w17, hs -; CHECK-GI-NEXT: adds x10, x10, x14 -; CHECK-GI-NEXT: and x14, x17, #0x1 +; CHECK-GI-NEXT: cset w18, hs +; CHECK-GI-NEXT: adds x13, x13, x15 +; CHECK-GI-NEXT: add x15, x16, x17 +; CHECK-GI-NEXT: and x16, x18, #0x1 ; CHECK-GI-NEXT: cset w17, hs ; CHECK-GI-NEXT: add x8, x9, x8 -; CHECK-GI-NEXT: add x14, x15, x14 -; CHECK-GI-NEXT: and x15, x17, #0x1 -; CHECK-GI-NEXT: orr x12, x16, x12, lsr #4 -; CHECK-GI-NEXT: add x9, x14, x15 -; CHECK-GI-NEXT: add x8, x8, x18 -; CHECK-GI-NEXT: add x8, x8, x9 -; CHECK-GI-NEXT: lsr x9, x13, #4 -; CHECK-GI-NEXT: umulh x14, x12, x11 -; CHECK-GI-NEXT: lsl x13, x8, #60 +; CHECK-GI-NEXT: add x15, x15, x16 +; CHECK-GI-NEXT: and x16, x17, #0x1 +; CHECK-GI-NEXT: lsr x9, x12, #4 +; CHECK-GI-NEXT: add x15, x15, x16 +; CHECK-GI-NEXT: umulh x17, x11, x10 +; CHECK-GI-NEXT: add x8, x8, x14 +; CHECK-GI-NEXT: add x8, x8, x15 +; CHECK-GI-NEXT: mul x11, x11, x10 +; CHECK-GI-NEXT: extr x12, x8, x13, #4 ; CHECK-GI-NEXT: lsr x8, x8, #4 -; CHECK-GI-NEXT: mul x12, x12, x11 -; CHECK-GI-NEXT: orr x10, x13, x10, lsr #4 -; CHECK-GI-NEXT: madd x9, x9, x11, x14 -; CHECK-GI-NEXT: umulh x13, x10, x11 -; CHECK-GI-NEXT: subs x0, x0, x12 -; CHECK-GI-NEXT: mul x10, x10, x11 +; CHECK-GI-NEXT: madd x9, x9, x10, x17 +; CHECK-GI-NEXT: umulh x13, x12, x10 +; CHECK-GI-NEXT: subs x0, x0, x11 +; CHECK-GI-NEXT: mul x12, x12, x10 ; CHECK-GI-NEXT: sbc x1, x1, x9 -; CHECK-GI-NEXT: madd x8, x8, x11, x13 -; CHECK-GI-NEXT: subs x2, x2, x10 +; CHECK-GI-NEXT: madd x8, x8, x10, x13 +; CHECK-GI-NEXT: subs x2, x2, x12 ; CHECK-GI-NEXT: sbc x3, x3, x8 ; CHECK-GI-NEXT: ret entry: diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.ll index 221e2fd..09e1fca 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.ll @@ -1200,7 +1200,7 @@ define amdgpu_ps void @atomic_cmpswap_i32_1d_no_return(<8 x i32> inreg %rsrc, i3 ; GFX6-NEXT: s_mov_b32 s5, s7 ; GFX6-NEXT: s_mov_b32 s6, s8 ; GFX6-NEXT: s_mov_b32 s7, s9 -; GFX6-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm glc +; GFX6-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm ; GFX6-NEXT: s_endpgm ; ; GFX8-LABEL: atomic_cmpswap_i32_1d_no_return: @@ -1213,7 +1213,7 @@ define amdgpu_ps void @atomic_cmpswap_i32_1d_no_return(<8 x i32> inreg %rsrc, i3 ; GFX8-NEXT: s_mov_b32 s5, s7 ; GFX8-NEXT: s_mov_b32 s6, s8 ; GFX8-NEXT: s_mov_b32 s7, s9 -; GFX8-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm glc +; GFX8-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm ; GFX8-NEXT: s_endpgm ; ; GFX900-LABEL: atomic_cmpswap_i32_1d_no_return: @@ -1226,7 +1226,7 @@ define amdgpu_ps void @atomic_cmpswap_i32_1d_no_return(<8 x i32> inreg %rsrc, i3 ; GFX900-NEXT: s_mov_b32 s5, s7 ; GFX900-NEXT: s_mov_b32 s6, s8 ; GFX900-NEXT: s_mov_b32 s7, s9 -; GFX900-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm glc +; GFX900-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm ; GFX900-NEXT: s_endpgm ; ; GFX90A-LABEL: atomic_cmpswap_i32_1d_no_return: @@ -1239,7 +1239,7 @@ define amdgpu_ps void @atomic_cmpswap_i32_1d_no_return(<8 x i32> inreg %rsrc, i3 ; GFX90A-NEXT: s_mov_b32 s5, s7 ; GFX90A-NEXT: s_mov_b32 s6, s8 ; GFX90A-NEXT: s_mov_b32 s7, s9 -; GFX90A-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm glc +; GFX90A-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 unorm ; GFX90A-NEXT: s_endpgm ; ; GFX10PLUS-LABEL: atomic_cmpswap_i32_1d_no_return: @@ -1252,7 +1252,7 @@ define amdgpu_ps void @atomic_cmpswap_i32_1d_no_return(<8 x i32> inreg %rsrc, i3 ; GFX10PLUS-NEXT: s_mov_b32 s5, s7 ; GFX10PLUS-NEXT: s_mov_b32 s6, s8 ; GFX10PLUS-NEXT: s_mov_b32 s7, s9 -; GFX10PLUS-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D unorm glc +; GFX10PLUS-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D unorm ; GFX10PLUS-NEXT: s_endpgm ; ; GFX12-LABEL: atomic_cmpswap_i32_1d_no_return: @@ -1265,7 +1265,7 @@ define amdgpu_ps void @atomic_cmpswap_i32_1d_no_return(<8 x i32> inreg %rsrc, i3 ; GFX12-NEXT: s_mov_b32 s5, s7 ; GFX12-NEXT: s_mov_b32 s6, s8 ; GFX12-NEXT: s_mov_b32 s7, s9 -; GFX12-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN +; GFX12-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D ; GFX12-NEXT: s_endpgm main_body: %v = call i32 @llvm.amdgcn.image.atomic.cmpswap.1d.i32.i32(i32 %cmp, i32 %swap, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) @@ -3194,7 +3194,7 @@ define amdgpu_ps void @atomic_cmpswap_i64_1d_no_return(<8 x i32> inreg %rsrc, i6 ; GFX6-NEXT: s_mov_b32 s5, s7 ; GFX6-NEXT: s_mov_b32 s6, s8 ; GFX6-NEXT: s_mov_b32 s7, s9 -; GFX6-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm glc +; GFX6-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm ; GFX6-NEXT: s_endpgm ; ; GFX8-LABEL: atomic_cmpswap_i64_1d_no_return: @@ -3207,7 +3207,7 @@ define amdgpu_ps void @atomic_cmpswap_i64_1d_no_return(<8 x i32> inreg %rsrc, i6 ; GFX8-NEXT: s_mov_b32 s5, s7 ; GFX8-NEXT: s_mov_b32 s6, s8 ; GFX8-NEXT: s_mov_b32 s7, s9 -; GFX8-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm glc +; GFX8-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm ; GFX8-NEXT: s_endpgm ; ; GFX900-LABEL: atomic_cmpswap_i64_1d_no_return: @@ -3220,7 +3220,7 @@ define amdgpu_ps void @atomic_cmpswap_i64_1d_no_return(<8 x i32> inreg %rsrc, i6 ; GFX900-NEXT: s_mov_b32 s5, s7 ; GFX900-NEXT: s_mov_b32 s6, s8 ; GFX900-NEXT: s_mov_b32 s7, s9 -; GFX900-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm glc +; GFX900-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm ; GFX900-NEXT: s_endpgm ; ; GFX90A-LABEL: atomic_cmpswap_i64_1d_no_return: @@ -3233,7 +3233,7 @@ define amdgpu_ps void @atomic_cmpswap_i64_1d_no_return(<8 x i32> inreg %rsrc, i6 ; GFX90A-NEXT: s_mov_b32 s5, s7 ; GFX90A-NEXT: s_mov_b32 s6, s8 ; GFX90A-NEXT: s_mov_b32 s7, s9 -; GFX90A-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm glc +; GFX90A-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf unorm ; GFX90A-NEXT: s_endpgm ; ; GFX10PLUS-LABEL: atomic_cmpswap_i64_1d_no_return: @@ -3246,7 +3246,7 @@ define amdgpu_ps void @atomic_cmpswap_i64_1d_no_return(<8 x i32> inreg %rsrc, i6 ; GFX10PLUS-NEXT: s_mov_b32 s5, s7 ; GFX10PLUS-NEXT: s_mov_b32 s6, s8 ; GFX10PLUS-NEXT: s_mov_b32 s7, s9 -; GFX10PLUS-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm glc +; GFX10PLUS-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm ; GFX10PLUS-NEXT: s_endpgm ; ; GFX12-LABEL: atomic_cmpswap_i64_1d_no_return: @@ -3259,7 +3259,7 @@ define amdgpu_ps void @atomic_cmpswap_i64_1d_no_return(<8 x i32> inreg %rsrc, i6 ; GFX12-NEXT: s_mov_b32 s5, s7 ; GFX12-NEXT: s_mov_b32 s6, s8 ; GFX12-NEXT: s_mov_b32 s7, s9 -; GFX12-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN +; GFX12-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX12-NEXT: s_endpgm main_body: %v = call i64 @llvm.amdgcn.image.atomic.cmpswap.1d.i64.i32(i64 %cmp, i64 %swap, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.mir index 292fa4b..4f160b6 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.mir @@ -25,6 +25,7 @@ body: | ; GFX6-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V2_V1_si]].sub0 ; GFX6-NEXT: $vgpr0 = COPY [[COPY3]] ; GFX6-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; ; GFX8-LABEL: name: atomic_cmpswap_i32_1d ; GFX8: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2 ; GFX8-NEXT: {{ $}} @@ -35,6 +36,7 @@ body: | ; GFX8-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V2_V1_vi]].sub0 ; GFX8-NEXT: $vgpr0 = COPY [[COPY3]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; ; GFX10-LABEL: name: atomic_cmpswap_i32_1d ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2 ; GFX10-NEXT: {{ $}} @@ -45,6 +47,7 @@ body: | ; GFX10-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx10_]].sub0 ; GFX10-NEXT: $vgpr0 = COPY [[COPY3]] ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; ; GFX11-LABEL: name: atomic_cmpswap_i32_1d ; GFX11: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2 ; GFX11-NEXT: {{ $}} @@ -55,6 +58,7 @@ body: | ; GFX11-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx11_]].sub0 ; GFX11-NEXT: $vgpr0 = COPY [[COPY3]] ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; ; GFX12-LABEL: name: atomic_cmpswap_i32_1d ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2 ; GFX12-NEXT: {{ $}} @@ -89,39 +93,43 @@ body: | ; GFX6-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 ; GFX6-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX6-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX6-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V2_V1_si:%[0-9]+]]:vreg_64 = IMAGE_ATOMIC_CMPSWAP_V2_V1_si [[COPY1]], [[COPY2]], [[COPY]], 3, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8) + ; GFX6-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V2_V1_si [[COPY1]], [[COPY2]], [[COPY]], 3, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8) ; GFX6-NEXT: S_ENDPGM 0 + ; ; GFX8-LABEL: name: atomic_cmpswap_i32_1d_no_return ; GFX8: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2 ; GFX8-NEXT: {{ $}} ; GFX8-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 ; GFX8-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX8-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX8-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V1_V1_vi:%[0-9]+]]:vreg_64 = IMAGE_ATOMIC_CMPSWAP_V2_V1_vi [[COPY1]], [[COPY2]], [[COPY]], 3, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8) + ; GFX8-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V2_V1_vi [[COPY1]], [[COPY2]], [[COPY]], 3, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 + ; ; GFX10-LABEL: name: atomic_cmpswap_i32_1d_no_return ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX10-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx10_:%[0-9]+]]:vreg_64 = IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx10 [[COPY1]], [[COPY2]], [[COPY]], 3, 0, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8) + ; GFX10-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V2_V1_gfx10 [[COPY1]], [[COPY2]], [[COPY]], 3, 0, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8) ; GFX10-NEXT: S_ENDPGM 0 + ; ; GFX11-LABEL: name: atomic_cmpswap_i32_1d_no_return ; GFX11: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2 ; GFX11-NEXT: {{ $}} ; GFX11-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX11-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx11_:%[0-9]+]]:vreg_64 = IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx11 [[COPY1]], [[COPY2]], [[COPY]], 3, 0, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8) + ; GFX11-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V2_V1_gfx11 [[COPY1]], [[COPY2]], [[COPY]], 3, 0, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8) ; GFX11-NEXT: S_ENDPGM 0 + ; ; GFX12-LABEL: name: atomic_cmpswap_i32_1d_no_return ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1, $vgpr2 ; GFX12-NEXT: {{ $}} ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx12_:%[0-9]+]]:vreg_64 = IMAGE_ATOMIC_CMPSWAP_V2_V1_gfx12 [[COPY1]], [[COPY2]], [[COPY]], 3, 0, 1, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8) + ; GFX12-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V2_V1_gfx12 [[COPY1]], [[COPY2]], [[COPY]], 3, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), addrspace 8) ; GFX12-NEXT: S_ENDPGM 0 %0:sgpr(<8 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 %1:vgpr(<2 x s32>) = COPY $vgpr0_vgpr1 @@ -150,6 +158,7 @@ body: | ; GFX6-NEXT: [[COPY3:%[0-9]+]]:vreg_64 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V4_V1_si]].sub0_sub1 ; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[COPY3]] ; GFX6-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0_vgpr1 + ; ; GFX8-LABEL: name: atomic_cmpswap_i64_1d ; GFX8: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4 ; GFX8-NEXT: {{ $}} @@ -160,6 +169,7 @@ body: | ; GFX8-NEXT: [[COPY3:%[0-9]+]]:vreg_64 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V4_V1_vi]].sub0_sub1 ; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[COPY3]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0_vgpr1 + ; ; GFX10-LABEL: name: atomic_cmpswap_i64_1d ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4 ; GFX10-NEXT: {{ $}} @@ -170,6 +180,7 @@ body: | ; GFX10-NEXT: [[COPY3:%[0-9]+]]:vreg_64 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx10_]].sub0_sub1 ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[COPY3]] ; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0_vgpr1 + ; ; GFX11-LABEL: name: atomic_cmpswap_i64_1d ; GFX11: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4 ; GFX11-NEXT: {{ $}} @@ -180,6 +191,7 @@ body: | ; GFX11-NEXT: [[COPY3:%[0-9]+]]:vreg_64 = COPY killed [[IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx11_]].sub0_sub1 ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[COPY3]] ; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0_vgpr1 + ; ; GFX12-LABEL: name: atomic_cmpswap_i64_1d ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4 ; GFX12-NEXT: {{ $}} @@ -214,39 +226,43 @@ body: | ; GFX6-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 ; GFX6-NEXT: [[COPY1:%[0-9]+]]:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX6-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX6-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V4_V1_si:%[0-9]+]]:vreg_128 = IMAGE_ATOMIC_CMPSWAP_V4_V1_si [[COPY1]], [[COPY2]], [[COPY]], 15, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8) + ; GFX6-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V4_V1_si [[COPY1]], [[COPY2]], [[COPY]], 15, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8) ; GFX6-NEXT: S_ENDPGM 0 + ; ; GFX8-LABEL: name: atomic_cmpswap_i64_1d_no_return ; GFX8: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4 ; GFX8-NEXT: {{ $}} ; GFX8-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 ; GFX8-NEXT: [[COPY1:%[0-9]+]]:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX8-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX8-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V4_V1_vi:%[0-9]+]]:vreg_128 = IMAGE_ATOMIC_CMPSWAP_V4_V1_vi [[COPY1]], [[COPY2]], [[COPY]], 15, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8) + ; GFX8-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V4_V1_vi [[COPY1]], [[COPY2]], [[COPY]], 15, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 + ; ; GFX10-LABEL: name: atomic_cmpswap_i64_1d_no_return ; GFX10: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX10-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx10_:%[0-9]+]]:vreg_128 = IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx10 [[COPY1]], [[COPY2]], [[COPY]], 15, 0, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8) + ; GFX10-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V4_V1_gfx10 [[COPY1]], [[COPY2]], [[COPY]], 15, 0, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8) ; GFX10-NEXT: S_ENDPGM 0 + ; ; GFX11-LABEL: name: atomic_cmpswap_i64_1d_no_return ; GFX11: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4 ; GFX11-NEXT: {{ $}} ; GFX11-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 ; GFX11-NEXT: [[COPY1:%[0-9]+]]:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX11-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX11-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx11_:%[0-9]+]]:vreg_128 = IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx11 [[COPY1]], [[COPY2]], [[COPY]], 15, 0, 1, 1, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8) + ; GFX11-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V4_V1_gfx11 [[COPY1]], [[COPY2]], [[COPY]], 15, 0, 1, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8) ; GFX11-NEXT: S_ENDPGM 0 + ; ; GFX12-LABEL: name: atomic_cmpswap_i64_1d_no_return ; GFX12: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4 ; GFX12-NEXT: {{ $}} ; GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx12_:%[0-9]+]]:vreg_128 = IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx12 [[COPY1]], [[COPY2]], [[COPY]], 15, 0, 1, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8) + ; GFX12-NEXT: IMAGE_ATOMIC_CMPSWAP_NORTN_V4_V1_gfx12 [[COPY1]], [[COPY2]], [[COPY]], 15, 0, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 8) ; GFX12-NEXT: S_ENDPGM 0 %0:sgpr(<8 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 %1:vgpr(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll index 6c4f504..33ce278 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll @@ -23,7 +23,9 @@ define protected amdgpu_kernel void @trivial_waterfall_eq_zero(ptr addrspace(1) ; PASS-CHECK: [[WHILE]]: ; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ] ; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true +; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]]) ; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true +; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp eq i64 [[BALLOT]], 0 ; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]] ; PASS-CHECK: [[IF]]: ; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4 @@ -75,7 +77,9 @@ define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(ptr addrs ; PASS-CHECK: [[WHILE]]: ; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ] ; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true +; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]]) ; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true +; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp eq i64 0, [[BALLOT]] ; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]] ; PASS-CHECK: [[IF]]: ; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4 @@ -126,6 +130,8 @@ define protected amdgpu_kernel void @trivial_waterfall_ne_zero(ptr addrspace(1) ; PASS-CHECK-NEXT: br label %[[WHILE:.*]] ; PASS-CHECK: [[WHILE]]: ; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ] +; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]]) +; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp ne i64 0, [[BALLOT]] ; PASS-CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]] ; PASS-CHECK: [[IF]]: ; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4 @@ -175,6 +181,8 @@ define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(ptr addrspac ; PASS-CHECK-NEXT: br label %[[WHILE:.*]] ; PASS-CHECK: [[WHILE]]: ; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ] +; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]]) +; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp ne i64 [[BALLOT]], 0 ; PASS-CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]] ; PASS-CHECK: [[IF]]: ; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4 @@ -225,7 +233,9 @@ define protected amdgpu_kernel void @trivial_uniform_waterfall(ptr addrspace(1) ; PASS-CHECK: [[WHILE]]: ; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[TAIL:.*]] ] ; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true +; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]]) ; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true +; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp eq i64 [[BALLOT]], 0 ; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF:.*]] ; PASS-CHECK: [[IF]]: ; PASS-CHECK-NEXT: [[IS_FIRST_ACTIVE_ID:%.*]] = icmp eq i32 0, 0 @@ -292,7 +302,9 @@ define protected amdgpu_kernel void @uniform_waterfall(ptr addrspace(1) %out, i3 ; PASS-CHECK: [[WHILE]]: ; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[TAIL:.*]] ] ; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true +; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]]) ; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true +; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp eq i64 [[BALLOT]], 0 ; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF:.*]] ; PASS-CHECK: [[IF]]: ; PASS-CHECK-NEXT: [[IS_FIRST_ACTIVE_ID:%.*]] = icmp eq i32 [[MYMASK]], [[MYMASK]] @@ -359,7 +371,9 @@ define protected amdgpu_kernel void @trivial_waterfall_eq_zero_i32(ptr addrspace ; PASS-CHECK: [[WHILE]]: ; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ] ; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true +; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[NOT_DONE]]) ; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true +; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp eq i32 [[BALLOT]], 0 ; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]] ; PASS-CHECK: [[IF]]: ; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4 @@ -410,6 +424,8 @@ define protected amdgpu_kernel void @trivial_waterfall_ne_zero_i32(ptr addrspace ; PASS-CHECK-NEXT: br label %[[WHILE:.*]] ; PASS-CHECK: [[WHILE]]: ; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ] +; PASS-CHECK-NEXT: [[BALLOT:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[DONE]]) +; PASS-CHECK-NEXT: [[IS_DONE:%.*]] = icmp ne i32 0, [[BALLOT]] ; PASS-CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]] ; PASS-CHECK: [[IF]]: ; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4 diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll index aa11574..a3e42e5 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll @@ -595,6 +595,8 @@ define amdgpu_kernel void @ballot_i32(i32 %v, ptr addrspace(1) %out) { ; PASS-CHECK-LABEL: define amdgpu_kernel void @ballot_i32( ; PASS-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] { ; PASS-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1 +; PASS-CHECK-NEXT: [[BALLOT:%.*]] = call i32 @llvm.amdgcn.ballot.i32(i1 [[C]]) +; PASS-CHECK-NEXT: [[BALLOT_NE_ZERO:%.*]] = icmp ne i32 [[BALLOT]], 0 ; PASS-CHECK-NEXT: store i1 [[C]], ptr addrspace(1) [[OUT]], align 1 ; PASS-CHECK-NEXT: ret void ; @@ -623,6 +625,8 @@ define amdgpu_kernel void @ballot_i64(i32 %v, ptr addrspace(1) %out) { ; PASS-CHECK-LABEL: define amdgpu_kernel void @ballot_i64( ; PASS-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] { ; PASS-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1 +; PASS-CHECK-NEXT: [[BALLOT:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[C]]) +; PASS-CHECK-NEXT: [[BALLOT_NE_ZERO:%.*]] = icmp ne i64 [[BALLOT]], 0 ; PASS-CHECK-NEXT: store i1 [[C]], ptr addrspace(1) [[OUT]], align 1 ; PASS-CHECK-NEXT: ret void ; diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.dim.gfx90a.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.dim.gfx90a.ll index 49607e3..83f0229 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.dim.gfx90a.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.dim.gfx90a.ll @@ -92,8 +92,7 @@ define amdgpu_ps void @atomic_swap_1d_agpr_noret(<8 x i32> inreg %rsrc, i32 %s) ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def a0 ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_accvgpr_read_b32 v1, a0 -; GFX90A-NEXT: image_atomic_swap v1, v0, s[0:7] dmask:0x1 unorm glc +; GFX90A-NEXT: image_atomic_swap a0, v0, s[0:7] dmask:0x1 unorm ; GFX90A-NEXT: s_endpgm %data = call i32 asm "; def $0", "=a"() %unused = call i32 @llvm.amdgcn.image.atomic.swap.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) @@ -106,8 +105,7 @@ define amdgpu_ps void @atomic_add_2d_agpr_noret(<8 x i32> inreg %rsrc, i32 %s, i ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def a0 ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 -; GFX90A-NEXT: image_atomic_add v2, v[0:1], s[0:7] dmask:0x1 unorm glc +; GFX90A-NEXT: image_atomic_add a0, v[0:1], s[0:7] dmask:0x1 unorm ; GFX90A-NEXT: s_endpgm %data = call i32 asm "; def $0", "=a"() %unused = call i32 @llvm.amdgcn.image.atomic.add.2d.i32.i32(i32 %data, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0) @@ -123,9 +121,7 @@ define amdgpu_ps void @atomic_cmpswap_1d_agpr_noret(<8 x i32> inreg %rsrc, i32 % ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def a1 ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 -; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 -; GFX90A-NEXT: image_atomic_cmpswap v[2:3], v0, s[0:7] dmask:0x3 unorm glc +; GFX90A-NEXT: image_atomic_cmpswap a[0:1], v0, s[0:7] dmask:0x3 unorm ; GFX90A-NEXT: s_endpgm %cmp = call i32 asm "; def $0", "=a"() %swap = call i32 asm "; def $0", "=a"() @@ -139,9 +135,7 @@ define amdgpu_ps void @atomic_swap_1d_i64_agpr_noret(<8 x i32> inreg %rsrc, i32 ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def a[0:1] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 -; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 -; GFX90A-NEXT: image_atomic_swap v[2:3], v0, s[0:7] dmask:0x3 unorm glc +; GFX90A-NEXT: image_atomic_swap a[0:1], v0, s[0:7] dmask:0x3 unorm ; GFX90A-NEXT: s_endpgm %data = call i64 asm "; def $0", "=a"() %unused = call i64 @llvm.amdgcn.image.atomic.swap.1d.i64.i32(i64 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) @@ -154,14 +148,10 @@ define amdgpu_ps void @atomic_cmpswap_1d_64_agpr_noret(<8 x i32> inreg %rsrc, i3 ; GFX90A-NEXT: ;;#ASMSTART ; GFX90A-NEXT: ; def a[0:1] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_accvgpr_read_b32 v3, a1 -; GFX90A-NEXT: v_accvgpr_read_b32 v2, a0 ; GFX90A-NEXT: ;;#ASMSTART -; GFX90A-NEXT: ; def a[0:1] +; GFX90A-NEXT: ; def a[2:3] ; GFX90A-NEXT: ;;#ASMEND -; GFX90A-NEXT: v_accvgpr_read_b32 v5, a1 -; GFX90A-NEXT: v_accvgpr_read_b32 v4, a0 -; GFX90A-NEXT: image_atomic_cmpswap v[2:5], v0, s[0:7] dmask:0xf unorm glc +; GFX90A-NEXT: image_atomic_cmpswap a[0:3], v0, s[0:7] dmask:0xf unorm ; GFX90A-NEXT: s_endpgm %cmp = call i64 asm "; def $0", "=a"() %swap = call i64 asm "; def $0", "=a"() diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.noret.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.noret.ll new file mode 100644 index 0000000..6c58a1a --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.noret.ll @@ -0,0 +1,581 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX10PLUS-GISE %s +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX10PLUS %s +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12-GISE %s +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12 %s + +define amdgpu_ps void @atomic_swap_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_swap_1d: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_swap_1d: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_swap_1d: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_swap_1d: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.swap.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_swap_1d_i64(<8 x i32> inreg %rsrc, i64 %data, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_swap_1d_i64: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_swap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_swap_1d_i64: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_swap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_swap_1d_i64: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_swap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_swap_1d_i64: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_swap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call i64 @llvm.amdgcn.image.atomic.swap.1d.i64.i32(i64 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_swap_1d_float(<8 x i32> inreg %rsrc, float %data, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_swap_1d_float: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_swap_1d_float: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_swap_1d_float: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_swap_1d_float: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_swap v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call float @llvm.amdgcn.image.atomic.swap.1d.f32.i32(float %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_add_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_add_1d: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_add_1d: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_add v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_add_1d: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_add_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_add_1d: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_add_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.add.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_sub_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_sub_1d: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_sub v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_sub_1d: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_sub v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_sub_1d: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_sub_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_sub_1d: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_sub_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.sub.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_smin_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_smin_1d: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_smin v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_smin_1d: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_smin v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_smin_1d: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_min_int v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_smin_1d: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_min_int v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.smin.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_umin_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_umin_1d: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_umin v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_umin_1d: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_umin v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_umin_1d: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_min_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_umin_1d: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_min_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.umin.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_smax_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_smax_1d: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_smax v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_smax_1d: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_smax v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_smax_1d: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_max_int v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_smax_1d: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_max_int v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.smax.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_umax_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_umax_1d: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_umax v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_umax_1d: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_umax v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_umax_1d: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_max_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_umax_1d: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_max_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.umax.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_and_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_and_1d: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_and v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_and_1d: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_and v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_and_1d: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_and v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_and_1d: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_and v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.and.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_or_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_or_1d: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_or v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_or_1d: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_or v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_or_1d: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_or v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_or_1d: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_or v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.or.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_xor_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_xor_1d: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_xor v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_xor_1d: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_xor v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_xor_1d: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_xor v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_xor_1d: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_xor v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.xor.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_inc_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_inc_1d: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_inc v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_inc_1d: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_inc v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_inc_1d: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_inc_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_inc_1d: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_inc_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.inc.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_dec_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_dec_1d: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_dec v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_dec_1d: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_dec v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_dec_1d: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_dec_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_dec_1d: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_dec_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.dec.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_cmpswap_1d(<8 x i32> inreg %rsrc, i32 %cmp, i32 %swap, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_cmpswap_1d: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_cmpswap_1d: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_cmpswap_1d: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_cmpswap_1d: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_cmpswap v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.cmpswap.1d.i32.i32(i32 %cmp, i32 %swap, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_cmpswap_1d_64(<8 x i32> inreg %rsrc, i64 %cmp, i64 %swap, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_cmpswap_1d_64: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_cmpswap_1d_64: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_cmpswap_1d_64: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_cmpswap_1d_64: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_cmpswap v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D +; GFX12-NEXT: s_endpgm + %v = call i64 @llvm.amdgcn.image.atomic.cmpswap.1d.i64.i32(i64 %cmp, i64 %swap, i32 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_add_2d(<8 x i32> inreg %rsrc, i32 %data, i32 %s, i32 %t) { +; GFX10PLUS-GISE-LABEL: atomic_add_2d: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_add_2d: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_add_2d: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_add_uint v0, [v1, v2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_add_2d: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_add_uint v0, [v1, v2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.add.2d.i32.i32(i32 %data, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_add_3d(<8 x i32> inreg %rsrc, i32 %data, i32 %s, i32 %t, i32 %r) { +; GFX10PLUS-GISE-LABEL: atomic_add_3d: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_3D unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_add_3d: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_3D unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_add_3d: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_3D +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_add_3d: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_3D +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.add.3d.i32.i32(i32 %data, i32 %s, i32 %t, i32 %r, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_add_cube(<8 x i32> inreg %rsrc, i32 %data, i32 %s, i32 %t, i32 %face) { +; GFX10PLUS-GISE-LABEL: atomic_add_cube: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_CUBE unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_add_cube: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_CUBE unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_add_cube: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_CUBE +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_add_cube: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_CUBE +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.add.cube.i32.i32(i32 %data, i32 %s, i32 %t, i32 %face, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_add_1darray(<8 x i32> inreg %rsrc, i32 %data, i32 %s, i32 %slice) { +; GFX10PLUS-GISE-LABEL: atomic_add_1darray: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D_ARRAY unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_add_1darray: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D_ARRAY unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_add_1darray: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_add_uint v0, [v1, v2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D_ARRAY +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_add_1darray: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_add_uint v0, [v1, v2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D_ARRAY +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.add.1darray.i32.i32(i32 %data, i32 %s, i32 %slice, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_add_2darray(<8 x i32> inreg %rsrc, i32 %data, i32 %s, i32 %t, i32 %slice) { +; GFX10PLUS-GISE-LABEL: atomic_add_2darray: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_add_2darray: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_add_2darray: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_add_2darray: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.add.2darray.i32.i32(i32 %data, i32 %s, i32 %t, i32 %slice, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_add_2dmsaa(<8 x i32> inreg %rsrc, i32 %data, i32 %s, i32 %t, i32 %fragid) { +; GFX10PLUS-GISE-LABEL: atomic_add_2dmsaa: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_add_2dmsaa: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_add v0, v[1:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_add_2dmsaa: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_add_2dmsaa: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_add_uint v0, [v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.add.2dmsaa.i32.i32(i32 %data, i32 %s, i32 %t, i32 %fragid, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_add_2darraymsaa(<8 x i32> inreg %rsrc, i32 %data, i32 %s, i32 %t, i32 %slice, i32 %fragid) { +; GFX10PLUS-GISE-LABEL: atomic_add_2darraymsaa: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v[1:4], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_add_2darraymsaa: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_add v0, v[1:4], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_add_2darraymsaa: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_add_uint v0, [v1, v2, v3, v4], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_add_2darraymsaa: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_add_uint v0, [v1, v2, v3, v4], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.add.2darraymsaa.i32.i32(i32 %data, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @atomic_add_1d_slc(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { +; GFX10PLUS-GISE-LABEL: atomic_add_1d_slc: +; GFX10PLUS-GISE: ; %bb.0: +; GFX10PLUS-GISE-NEXT: image_atomic_add v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm slc +; GFX10PLUS-GISE-NEXT: s_endpgm +; +; GFX10PLUS-LABEL: atomic_add_1d_slc: +; GFX10PLUS: ; %bb.0: +; GFX10PLUS-NEXT: image_atomic_add v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm slc +; GFX10PLUS-NEXT: s_endpgm +; +; GFX12-GISE-LABEL: atomic_add_1d_slc: +; GFX12-GISE: ; %bb.0: +; GFX12-GISE-NEXT: image_atomic_add_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_NT +; GFX12-GISE-NEXT: s_endpgm +; +; GFX12-LABEL: atomic_add_1d_slc: +; GFX12: ; %bb.0: +; GFX12-NEXT: image_atomic_add_uint v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_NT +; GFX12-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.image.atomic.add.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, i32 2) + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.pk.add.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.pk.add.ll index 3d1d6c8..0ba62e4 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.pk.add.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.pk.add.ll @@ -41,15 +41,13 @@ main_body: define amdgpu_ps float @atomic_pk_add_f16_1d_v2_noret(<8 x i32> inreg %rsrc, <2 x half> %data, i32 %s) { ; GFX12-SDAG-LABEL: atomic_pk_add_f16_1d_v2_noret: ; GFX12-SDAG: ; %bb.0: ; %main_body -; GFX12-SDAG-NEXT: image_atomic_pk_add_f16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN -; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX12-SDAG-NEXT: image_atomic_pk_add_f16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D ; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 1.0 ; GFX12-SDAG-NEXT: ; return to shader part epilog ; ; GFX12-GISEL-LABEL: atomic_pk_add_f16_1d_v2_noret: ; GFX12-GISEL: ; %bb.0: ; %main_body -; GFX12-GISEL-NEXT: image_atomic_pk_add_f16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN -; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX12-GISEL-NEXT: image_atomic_pk_add_f16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D ; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, 1.0 ; GFX12-GISEL-NEXT: ; return to shader part epilog main_body: @@ -79,15 +77,13 @@ main_body: define amdgpu_ps float @atomic_pk_add_f16_1d_v4_noret(<8 x i32> inreg %rsrc, <4 x half> %data, i32 %s) { ; GFX12-SDAG-LABEL: atomic_pk_add_f16_1d_v4_noret: ; GFX12-SDAG: ; %bb.0: ; %main_body -; GFX12-SDAG-NEXT: image_atomic_pk_add_f16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN -; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX12-SDAG-NEXT: image_atomic_pk_add_f16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D ; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 1.0 ; GFX12-SDAG-NEXT: ; return to shader part epilog ; ; GFX12-GISEL-LABEL: atomic_pk_add_f16_1d_v4_noret: ; GFX12-GISEL: ; %bb.0: ; %main_body -; GFX12-GISEL-NEXT: image_atomic_pk_add_f16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN -; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX12-GISEL-NEXT: image_atomic_pk_add_f16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D ; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, 1.0 ; GFX12-GISEL-NEXT: ; return to shader part epilog main_body: @@ -126,15 +122,13 @@ main_body: define amdgpu_ps float @atomic_pk_add_bf16_1d_v2_noret(<8 x i32> inreg %rsrc, <2 x bfloat> %data, i32 %s) { ; GFX12-SDAG-LABEL: atomic_pk_add_bf16_1d_v2_noret: ; GFX12-SDAG: ; %bb.0: ; %main_body -; GFX12-SDAG-NEXT: image_atomic_pk_add_bf16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN -; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX12-SDAG-NEXT: image_atomic_pk_add_bf16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D ; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 1.0 ; GFX12-SDAG-NEXT: ; return to shader part epilog ; ; GFX12-GISEL-LABEL: atomic_pk_add_bf16_1d_v2_noret: ; GFX12-GISEL: ; %bb.0: ; %main_body -; GFX12-GISEL-NEXT: image_atomic_pk_add_bf16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN -; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX12-GISEL-NEXT: image_atomic_pk_add_bf16 v0, v1, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D ; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, 1.0 ; GFX12-GISEL-NEXT: ; return to shader part epilog main_body: @@ -173,15 +167,13 @@ main_body: define amdgpu_ps float @atomic_pk_add_bf16_1d_v4_noret(<8 x i32> inreg %rsrc, <4 x bfloat> %data, i32 %s) { ; GFX12-SDAG-LABEL: atomic_pk_add_bf16_1d_v4_noret: ; GFX12-SDAG: ; %bb.0: ; %main_body -; GFX12-SDAG-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN -; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX12-SDAG-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D ; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 1.0 ; GFX12-SDAG-NEXT: ; return to shader part epilog ; ; GFX12-GISEL-LABEL: atomic_pk_add_bf16_1d_v4_noret: ; GFX12-GISEL: ; %bb.0: ; %main_body -; GFX12-GISEL-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_RETURN -; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX12-GISEL-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D ; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, 1.0 ; GFX12-GISEL-NEXT: ; return to shader part epilog main_body: @@ -192,15 +184,13 @@ main_body: define amdgpu_ps float @atomic_pk_add_bf16_1d_v4_nt(<8 x i32> inreg %rsrc, <4 x bfloat> %data, i32 %s) { ; GFX12-SDAG-LABEL: atomic_pk_add_bf16_1d_v4_nt: ; GFX12-SDAG: ; %bb.0: ; %main_body -; GFX12-SDAG-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_NT_RETURN -; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX12-SDAG-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_NT ; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, 1.0 ; GFX12-SDAG-NEXT: ; return to shader part epilog ; ; GFX12-GISEL-LABEL: atomic_pk_add_bf16_1d_v4_nt: ; GFX12-GISEL: ; %bb.0: ; %main_body -; GFX12-GISEL-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_NT_RETURN -; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX12-GISEL-NEXT: image_atomic_pk_add_bf16 v[0:1], v2, s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_1D th:TH_ATOMIC_NT ; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, 1.0 ; GFX12-GISEL-NEXT: ; return to shader part epilog main_body: diff --git a/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll b/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll index 7a876f6..3544017 100644 --- a/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll +++ b/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll @@ -76,6 +76,20 @@ entry: ret i32 %ret } +define noundef i32 @wave_reduce_min(i32 noundef %x) { +entry: + ; CHECK: Function wave_reduce_min : [[WAVE_FLAG]] + %ret = call i32 @llvm.dx.wave.reduce.min.i32(i32 %x) + ret i32 %ret +} + +define noundef i32 @wave_reduce_umin(i32 noundef %x) { +entry: + ; CHECK: Function wave_reduce_umin : [[WAVE_FLAG]] + %ret = call i32 @llvm.dx.wave.reduce.umin.i32(i32 %x) + ret i32 %ret +} + define void @wave_active_countbits(i1 %expr) { entry: ; CHECK: Function wave_active_countbits : [[WAVE_FLAG]] diff --git a/llvm/test/CodeGen/DirectX/WaveActiveMin.ll b/llvm/test/CodeGen/DirectX/WaveActiveMin.ll new file mode 100644 index 0000000..24fde48 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/WaveActiveMin.ll @@ -0,0 +1,143 @@ +; RUN: opt -S -scalarizer -dxil-op-lower -mtriple=dxil-pc-shadermodel6.3-library < %s | FileCheck %s + +; Test that for scalar values, WaveActiveMin maps down to the DirectX op + +define noundef half @wave_active_min_half(half noundef %expr) { +entry: +; CHECK: call half @dx.op.waveActiveOp.f16(i32 119, half %expr, i8 2, i8 0){{$}} + %ret = call half @llvm.dx.wave.reduce.min.f16(half %expr) + ret half %ret +} + +define noundef float @wave_active_min_float(float noundef %expr) { +entry: +; CHECK: call float @dx.op.waveActiveOp.f32(i32 119, float %expr, i8 2, i8 0){{$}} + %ret = call float @llvm.dx.wave.reduce.min.f32(float %expr) + ret float %ret +} + +define noundef double @wave_active_min_double(double noundef %expr) { +entry: +; CHECK: call double @dx.op.waveActiveOp.f64(i32 119, double %expr, i8 2, i8 0){{$}} + %ret = call double @llvm.dx.wave.reduce.min.f64(double %expr) + ret double %ret +} + +define noundef i16 @wave_active_min_i16(i16 noundef %expr) { +entry: +; CHECK: call i16 @dx.op.waveActiveOp.i16(i32 119, i16 %expr, i8 2, i8 0){{$}} + %ret = call i16 @llvm.dx.wave.reduce.min.i16(i16 %expr) + ret i16 %ret +} + +define noundef i32 @wave_active_min_i32(i32 noundef %expr) { +entry: +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr, i8 2, i8 0){{$}} + %ret = call i32 @llvm.dx.wave.reduce.min.i32(i32 %expr) + ret i32 %ret +} + +define noundef i64 @wave_active_min_i64(i64 noundef %expr) { +entry: +; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr, i8 2, i8 0){{$}} + %ret = call i64 @llvm.dx.wave.reduce.min.i64(i64 %expr) + ret i64 %ret +} + +define noundef i16 @wave_active_umin_i16(i16 noundef %expr) { +entry: +; CHECK: call i16 @dx.op.waveActiveOp.i16(i32 119, i16 %expr, i8 2, i8 1){{$}} + %ret = call i16 @llvm.dx.wave.reduce.umin.i16(i16 %expr) + ret i16 %ret +} + +define noundef i32 @wave_active_umin_i32(i32 noundef %expr) { +entry: +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr, i8 2, i8 1){{$}} + %ret = call i32 @llvm.dx.wave.reduce.umin.i32(i32 %expr) + ret i32 %ret +} + +define noundef i64 @wave_active_umin_i64(i64 noundef %expr) { +entry: +; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr, i8 2, i8 1){{$}} + %ret = call i64 @llvm.dx.wave.reduce.umin.i64(i64 %expr) + ret i64 %ret +} + +declare half @llvm.dx.wave.reduce.min.f16(half) +declare float @llvm.dx.wave.reduce.min.f32(float) +declare double @llvm.dx.wave.reduce.min.f64(double) + +declare i16 @llvm.dx.wave.reduce.min.i16(i16) +declare i32 @llvm.dx.wave.reduce.min.i32(i32) +declare i64 @llvm.dx.wave.reduce.min.i64(i64) + +declare i16 @llvm.dx.wave.reduce.umin.i16(i16) +declare i32 @llvm.dx.wave.reduce.umin.i32(i32) +declare i64 @llvm.dx.wave.reduce.umin.i64(i64) + +; Test that for vector values, WaveActiveMin scalarizes and maps down to the +; DirectX op + +define noundef <2 x half> @wave_active_min_v2half(<2 x half> noundef %expr) { +entry: +; CHECK: call half @dx.op.waveActiveOp.f16(i32 119, half %expr.i0, i8 2, i8 0){{$}} +; CHECK: call half @dx.op.waveActiveOp.f16(i32 119, half %expr.i1, i8 2, i8 0){{$}} + %ret = call <2 x half> @llvm.dx.wave.reduce.min.v2f16(<2 x half> %expr) + ret <2 x half> %ret +} + +define noundef <3 x i32> @wave_active_min_v3i32(<3 x i32> noundef %expr) { +entry: +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i0, i8 2, i8 0){{$}} +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i1, i8 2, i8 0){{$}} +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i2, i8 2, i8 0){{$}} + %ret = call <3 x i32> @llvm.dx.wave.reduce.min.v3i32(<3 x i32> %expr) + ret <3 x i32> %ret +} + +define noundef <4 x double> @wave_active_min_v4f64(<4 x double> noundef %expr) { +entry: +; CHECK: call double @dx.op.waveActiveOp.f64(i32 119, double %expr.i0, i8 2, i8 0){{$}} +; CHECK: call double @dx.op.waveActiveOp.f64(i32 119, double %expr.i1, i8 2, i8 0){{$}} +; CHECK: call double @dx.op.waveActiveOp.f64(i32 119, double %expr.i2, i8 2, i8 0){{$}} +; CHECK: call double @dx.op.waveActiveOp.f64(i32 119, double %expr.i3, i8 2, i8 0){{$}} + %ret = call <4 x double> @llvm.dx.wave.reduce.min.v4f64(<4 x double> %expr) + ret <4 x double> %ret +} + +declare <2 x half> @llvm.dx.wave.reduce.min.v2f16(<2 x half>) +declare <3 x i32> @llvm.dx.wave.reduce.min.v3i32(<3 x i32>) +declare <4 x double> @llvm.dx.wave.reduce.min.v4f64(<4 x double>) + +define noundef <2 x i16> @wave_active_umin_v2i16(<2 x i16> noundef %expr) { +entry: +; CHECK: call i16 @dx.op.waveActiveOp.i16(i32 119, i16 %expr.i0, i8 2, i8 1){{$}} +; CHECK: call i16 @dx.op.waveActiveOp.i16(i32 119, i16 %expr.i1, i8 2, i8 1){{$}} + %ret = call <2 x i16> @llvm.dx.wave.reduce.umin.v2f16(<2 x i16> %expr) + ret <2 x i16> %ret +} + +define noundef <3 x i32> @wave_active_umin_v3i32(<3 x i32> noundef %expr) { +entry: +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i0, i8 2, i8 1){{$}} +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i1, i8 2, i8 1){{$}} +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i2, i8 2, i8 1){{$}} + %ret = call <3 x i32> @llvm.dx.wave.reduce.umin.v3i32(<3 x i32> %expr) + ret <3 x i32> %ret +} + +define noundef <4 x i64> @wave_active_umin_v4f64(<4 x i64> noundef %expr) { +entry: +; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr.i0, i8 2, i8 1){{$}} +; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr.i1, i8 2, i8 1){{$}} +; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr.i2, i8 2, i8 1){{$}} +; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr.i3, i8 2, i8 1){{$}} + %ret = call <4 x i64> @llvm.dx.wave.reduce.umin.v4f64(<4 x i64> %expr) + ret <4 x i64> %ret +} + +declare <2 x i16> @llvm.dx.wave.reduce.umin.v2f16(<2 x i16>) +declare <3 x i32> @llvm.dx.wave.reduce.umin.v3i32(<3 x i32>) +declare <4 x i64> @llvm.dx.wave.reduce.umin.v4f64(<4 x i64>) diff --git a/llvm/test/CodeGen/LoongArch/lasx/fp-max-min.ll b/llvm/test/CodeGen/LoongArch/lasx/fp-max-min.ll new file mode 100644 index 0000000..48ec98c --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/fp-max-min.ll @@ -0,0 +1,160 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +define void @minnum_v8f32(ptr %res, ptr %x, ptr %y) nounwind { +; CHECK-LABEL: minnum_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a2, 0 +; CHECK-NEXT: xvld $xr1, $a1, 0 +; CHECK-NEXT: xvpickve.w $xr2, $xr0, 5 +; CHECK-NEXT: xvpickve.w $xr3, $xr1, 5 +; CHECK-NEXT: fmin.s $fa2, $fa3, $fa2 +; CHECK-NEXT: xvpickve.w $xr3, $xr0, 4 +; CHECK-NEXT: xvpickve.w $xr4, $xr1, 4 +; CHECK-NEXT: fmin.s $fa3, $fa4, $fa3 +; CHECK-NEXT: vextrins.w $vr3, $vr2, 16 +; CHECK-NEXT: xvpickve.w $xr2, $xr0, 6 +; CHECK-NEXT: xvpickve.w $xr4, $xr1, 6 +; CHECK-NEXT: fmin.s $fa2, $fa4, $fa2 +; CHECK-NEXT: vextrins.w $vr3, $vr2, 32 +; CHECK-NEXT: xvpickve.w $xr2, $xr0, 7 +; CHECK-NEXT: xvpickve.w $xr4, $xr1, 7 +; CHECK-NEXT: fmin.s $fa2, $fa4, $fa2 +; CHECK-NEXT: vextrins.w $vr3, $vr2, 48 +; CHECK-NEXT: xvpickve.w $xr2, $xr0, 1 +; CHECK-NEXT: xvpickve.w $xr4, $xr1, 1 +; CHECK-NEXT: fmin.s $fa2, $fa4, $fa2 +; CHECK-NEXT: xvpickve.w $xr4, $xr0, 0 +; CHECK-NEXT: xvpickve.w $xr5, $xr1, 0 +; CHECK-NEXT: fmin.s $fa4, $fa5, $fa4 +; CHECK-NEXT: vextrins.w $vr4, $vr2, 16 +; CHECK-NEXT: xvpickve.w $xr2, $xr0, 2 +; CHECK-NEXT: xvpickve.w $xr5, $xr1, 2 +; CHECK-NEXT: fmin.s $fa2, $fa5, $fa2 +; CHECK-NEXT: vextrins.w $vr4, $vr2, 32 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 3 +; CHECK-NEXT: xvpickve.w $xr1, $xr1, 3 +; CHECK-NEXT: fmin.s $fa0, $fa1, $fa0 +; CHECK-NEXT: vextrins.w $vr4, $vr0, 48 +; CHECK-NEXT: xvpermi.q $xr4, $xr3, 2 +; CHECK-NEXT: xvst $xr4, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <8 x float>, ptr %x + %v1 = load <8 x float>, ptr %y + %r = call <8 x float> @llvm.minnum.v8f32(<8 x float> %v0, <8 x float> %v1) + store <8 x float> %r, ptr %res + ret void +} + +define void @minnum_v4f64(ptr %res, ptr %x, ptr %y) nounwind { +; CHECK-LABEL: minnum_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a2, 0 +; CHECK-NEXT: xvld $xr1, $a1, 0 +; CHECK-NEXT: xvpickve.d $xr2, $xr0, 3 +; CHECK-NEXT: xvpickve.d $xr3, $xr1, 3 +; CHECK-NEXT: fmin.d $fa2, $fa3, $fa2 +; CHECK-NEXT: xvpickve.d $xr3, $xr0, 2 +; CHECK-NEXT: xvpickve.d $xr4, $xr1, 2 +; CHECK-NEXT: fmin.d $fa3, $fa4, $fa3 +; CHECK-NEXT: vextrins.d $vr3, $vr2, 16 +; CHECK-NEXT: xvpickve.d $xr2, $xr0, 1 +; CHECK-NEXT: xvpickve.d $xr4, $xr1, 1 +; CHECK-NEXT: fmin.d $fa2, $fa4, $fa2 +; CHECK-NEXT: xvpickve.d $xr0, $xr0, 0 +; CHECK-NEXT: xvpickve.d $xr1, $xr1, 0 +; CHECK-NEXT: fmin.d $fa0, $fa1, $fa0 +; CHECK-NEXT: vextrins.d $vr0, $vr2, 16 +; CHECK-NEXT: xvpermi.q $xr0, $xr3, 2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <4 x double>, ptr %x + %v1 = load <4 x double>, ptr %y + %r = call <4 x double> @llvm.minnum.v4f64(<4 x double> %v0, <4 x double> %v1) + store <4 x double> %r, ptr %res + ret void +} + +define void @maxnum_v8f32(ptr %res, ptr %x, ptr %y) nounwind { +; CHECK-LABEL: maxnum_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a2, 0 +; CHECK-NEXT: xvld $xr1, $a1, 0 +; CHECK-NEXT: xvpickve.w $xr2, $xr0, 5 +; CHECK-NEXT: xvpickve.w $xr3, $xr1, 5 +; CHECK-NEXT: fmax.s $fa2, $fa3, $fa2 +; CHECK-NEXT: xvpickve.w $xr3, $xr0, 4 +; CHECK-NEXT: xvpickve.w $xr4, $xr1, 4 +; CHECK-NEXT: fmax.s $fa3, $fa4, $fa3 +; CHECK-NEXT: vextrins.w $vr3, $vr2, 16 +; CHECK-NEXT: xvpickve.w $xr2, $xr0, 6 +; CHECK-NEXT: xvpickve.w $xr4, $xr1, 6 +; CHECK-NEXT: fmax.s $fa2, $fa4, $fa2 +; CHECK-NEXT: vextrins.w $vr3, $vr2, 32 +; CHECK-NEXT: xvpickve.w $xr2, $xr0, 7 +; CHECK-NEXT: xvpickve.w $xr4, $xr1, 7 +; CHECK-NEXT: fmax.s $fa2, $fa4, $fa2 +; CHECK-NEXT: vextrins.w $vr3, $vr2, 48 +; CHECK-NEXT: xvpickve.w $xr2, $xr0, 1 +; CHECK-NEXT: xvpickve.w $xr4, $xr1, 1 +; CHECK-NEXT: fmax.s $fa2, $fa4, $fa2 +; CHECK-NEXT: xvpickve.w $xr4, $xr0, 0 +; CHECK-NEXT: xvpickve.w $xr5, $xr1, 0 +; CHECK-NEXT: fmax.s $fa4, $fa5, $fa4 +; CHECK-NEXT: vextrins.w $vr4, $vr2, 16 +; CHECK-NEXT: xvpickve.w $xr2, $xr0, 2 +; CHECK-NEXT: xvpickve.w $xr5, $xr1, 2 +; CHECK-NEXT: fmax.s $fa2, $fa5, $fa2 +; CHECK-NEXT: vextrins.w $vr4, $vr2, 32 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 3 +; CHECK-NEXT: xvpickve.w $xr1, $xr1, 3 +; CHECK-NEXT: fmax.s $fa0, $fa1, $fa0 +; CHECK-NEXT: vextrins.w $vr4, $vr0, 48 +; CHECK-NEXT: xvpermi.q $xr4, $xr3, 2 +; CHECK-NEXT: xvst $xr4, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <8 x float>, ptr %x + %v1 = load <8 x float>, ptr %y + %r = call <8 x float> @llvm.maxnum.v8f32(<8 x float> %v0, <8 x float> %v1) + store <8 x float> %r, ptr %res + ret void +} + +define void @maxnum_v4f64(ptr %res, ptr %x, ptr %y) nounwind { +; CHECK-LABEL: maxnum_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a2, 0 +; CHECK-NEXT: xvld $xr1, $a1, 0 +; CHECK-NEXT: xvpickve.d $xr2, $xr0, 3 +; CHECK-NEXT: xvpickve.d $xr3, $xr1, 3 +; CHECK-NEXT: fmax.d $fa2, $fa3, $fa2 +; CHECK-NEXT: xvpickve.d $xr3, $xr0, 2 +; CHECK-NEXT: xvpickve.d $xr4, $xr1, 2 +; CHECK-NEXT: fmax.d $fa3, $fa4, $fa3 +; CHECK-NEXT: vextrins.d $vr3, $vr2, 16 +; CHECK-NEXT: xvpickve.d $xr2, $xr0, 1 +; CHECK-NEXT: xvpickve.d $xr4, $xr1, 1 +; CHECK-NEXT: fmax.d $fa2, $fa4, $fa2 +; CHECK-NEXT: xvpickve.d $xr0, $xr0, 0 +; CHECK-NEXT: xvpickve.d $xr1, $xr1, 0 +; CHECK-NEXT: fmax.d $fa0, $fa1, $fa0 +; CHECK-NEXT: vextrins.d $vr0, $vr2, 16 +; CHECK-NEXT: xvpermi.q $xr0, $xr3, 2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <4 x double>, ptr %x + %v1 = load <4 x double>, ptr %y + %r = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %v0, <4 x double> %v1) + store <4 x double> %r, ptr %res + ret void +} + +declare <8 x float> @llvm.minnum.v8f32(<8 x float>, <8 x float>) +declare <4 x double> @llvm.minnum.v4f64(<4 x double>, <4 x double>) +declare <8 x float> @llvm.maxnum.v8f32(<8 x float>, <8 x float>) +declare <4 x double> @llvm.maxnum.v4f64(<4 x double>, <4 x double>) diff --git a/llvm/test/CodeGen/LoongArch/lsx/fp-max-min.ll b/llvm/test/CodeGen/LoongArch/lsx/fp-max-min.ll new file mode 100644 index 0000000..27ecb75 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/fp-max-min.ll @@ -0,0 +1,112 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s + +define void @minnum_v4f32(ptr %res, ptr %x, ptr %y) nounwind { +; CHECK-LABEL: minnum_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a2, 0 +; CHECK-NEXT: vld $vr1, $a1, 0 +; CHECK-NEXT: vreplvei.w $vr2, $vr0, 1 +; CHECK-NEXT: vreplvei.w $vr3, $vr1, 1 +; CHECK-NEXT: fmin.s $fa2, $fa3, $fa2 +; CHECK-NEXT: vreplvei.w $vr3, $vr0, 0 +; CHECK-NEXT: vreplvei.w $vr4, $vr1, 0 +; CHECK-NEXT: fmin.s $fa3, $fa4, $fa3 +; CHECK-NEXT: vextrins.w $vr3, $vr2, 16 +; CHECK-NEXT: vreplvei.w $vr2, $vr0, 2 +; CHECK-NEXT: vreplvei.w $vr4, $vr1, 2 +; CHECK-NEXT: fmin.s $fa2, $fa4, $fa2 +; CHECK-NEXT: vextrins.w $vr3, $vr2, 32 +; CHECK-NEXT: vreplvei.w $vr0, $vr0, 3 +; CHECK-NEXT: vreplvei.w $vr1, $vr1, 3 +; CHECK-NEXT: fmin.s $fa0, $fa1, $fa0 +; CHECK-NEXT: vextrins.w $vr3, $vr0, 48 +; CHECK-NEXT: vst $vr3, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <4 x float>, ptr %x + %v1 = load <4 x float>, ptr %y + %r = call <4 x float> @llvm.minnum.v4f32(<4 x float> %v0, <4 x float> %v1) + store <4 x float> %r, ptr %res + ret void +} + +define void @minnum_v2f64(ptr %res, ptr %x, ptr %y) nounwind { +; CHECK-LABEL: minnum_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a2, 0 +; CHECK-NEXT: vld $vr1, $a1, 0 +; CHECK-NEXT: vreplvei.d $vr2, $vr0, 1 +; CHECK-NEXT: vreplvei.d $vr3, $vr1, 1 +; CHECK-NEXT: fmin.d $fa2, $fa3, $fa2 +; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0 +; CHECK-NEXT: vreplvei.d $vr1, $vr1, 0 +; CHECK-NEXT: fmin.d $fa0, $fa1, $fa0 +; CHECK-NEXT: vextrins.d $vr0, $vr2, 16 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <2 x double>, ptr %x + %v1 = load <2 x double>, ptr %y + %r = call <2 x double> @llvm.minnum.v2f64(<2 x double> %v0, <2 x double> %v1) + store <2 x double> %r, ptr %res + ret void +} + +define void @maxnum_v4f32(ptr %res, ptr %x, ptr %y) nounwind { +; CHECK-LABEL: maxnum_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a2, 0 +; CHECK-NEXT: vld $vr1, $a1, 0 +; CHECK-NEXT: vreplvei.w $vr2, $vr0, 1 +; CHECK-NEXT: vreplvei.w $vr3, $vr1, 1 +; CHECK-NEXT: fmax.s $fa2, $fa3, $fa2 +; CHECK-NEXT: vreplvei.w $vr3, $vr0, 0 +; CHECK-NEXT: vreplvei.w $vr4, $vr1, 0 +; CHECK-NEXT: fmax.s $fa3, $fa4, $fa3 +; CHECK-NEXT: vextrins.w $vr3, $vr2, 16 +; CHECK-NEXT: vreplvei.w $vr2, $vr0, 2 +; CHECK-NEXT: vreplvei.w $vr4, $vr1, 2 +; CHECK-NEXT: fmax.s $fa2, $fa4, $fa2 +; CHECK-NEXT: vextrins.w $vr3, $vr2, 32 +; CHECK-NEXT: vreplvei.w $vr0, $vr0, 3 +; CHECK-NEXT: vreplvei.w $vr1, $vr1, 3 +; CHECK-NEXT: fmax.s $fa0, $fa1, $fa0 +; CHECK-NEXT: vextrins.w $vr3, $vr0, 48 +; CHECK-NEXT: vst $vr3, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <4 x float>, ptr %x + %v1 = load <4 x float>, ptr %y + %r = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %v0, <4 x float> %v1) + store <4 x float> %r, ptr %res + ret void +} + +define void @maxnum_v2f64(ptr %res, ptr %x, ptr %y) nounwind { +; CHECK-LABEL: maxnum_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a2, 0 +; CHECK-NEXT: vld $vr1, $a1, 0 +; CHECK-NEXT: vreplvei.d $vr2, $vr0, 1 +; CHECK-NEXT: vreplvei.d $vr3, $vr1, 1 +; CHECK-NEXT: fmax.d $fa2, $fa3, $fa2 +; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0 +; CHECK-NEXT: vreplvei.d $vr1, $vr1, 0 +; CHECK-NEXT: fmax.d $fa0, $fa1, $fa0 +; CHECK-NEXT: vextrins.d $vr0, $vr2, 16 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <2 x double>, ptr %x + %v1 = load <2 x double>, ptr %y + %r = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %v0, <2 x double> %v1) + store <2 x double> %r, ptr %res + ret void +} + +declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>) +declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>) +declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>) +declare <2 x double> @llvm.maxnum.v2f64(<2 x double>, <2 x double>) diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll b/llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll new file mode 100644 index 0000000..b43555c6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll @@ -0,0 +1,642 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+b,+zalrsc -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefixes=RV32IB-COMMON,RV32IB-ZALRSC %s +; RUN: llc -mtriple=riscv32 -mattr=+b,+zalrsc,+permissive-zalrsc -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefixes=RV32IB-COMMON,RV32IB-ZALRSC-PERM %s +; RUN: llc -mtriple=riscv32 -mattr=+b,+zalrsc,+permissive-zalrsc,+a -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefixes=RV32IB-COMMON,RV32IAB %s +; +; RUN: llc -mtriple=riscv64 -mattr=+b,+zalrsc -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefixes=RV64IB-ZALRSC %s +; RUN: llc -mtriple=riscv64 -mattr=+b,+zalrsc,+permissive-zalrsc -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefixes=RV64IB-ZALRSC-PERM %s +; RUN: llc -mtriple=riscv64 -mattr=+b,+zalrsc,+permissive-zalrsc,+a -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefixes=RV64IAB %s + +define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind { +; RV32IB-ZALRSC-LABEL: atomicrmw_max_i32_seq_cst: +; RV32IB-ZALRSC: # %bb.0: +; RV32IB-ZALRSC-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 +; RV32IB-ZALRSC-NEXT: lr.w.aqrl a2, (a0) +; RV32IB-ZALRSC-NEXT: mv a3, a2 +; RV32IB-ZALRSC-NEXT: bge a3, a1, .LBB0_3 +; RV32IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB0_1 Depth=1 +; RV32IB-ZALRSC-NEXT: mv a3, a1 +; RV32IB-ZALRSC-NEXT: .LBB0_3: # in Loop: Header=BB0_1 Depth=1 +; RV32IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0) +; RV32IB-ZALRSC-NEXT: bnez a3, .LBB0_1 +; RV32IB-ZALRSC-NEXT: # %bb.4: +; RV32IB-ZALRSC-NEXT: mv a0, a2 +; RV32IB-ZALRSC-NEXT: ret +; +; RV32IB-ZALRSC-PERM-LABEL: atomicrmw_max_i32_seq_cst: +; RV32IB-ZALRSC-PERM: # %bb.0: +; RV32IB-ZALRSC-PERM-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 +; RV32IB-ZALRSC-PERM-NEXT: lr.w.aqrl a2, (a0) +; RV32IB-ZALRSC-PERM-NEXT: max a3, a2, a1 +; RV32IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0) +; RV32IB-ZALRSC-PERM-NEXT: bnez a3, .LBB0_1 +; RV32IB-ZALRSC-PERM-NEXT: # %bb.2: +; RV32IB-ZALRSC-PERM-NEXT: mv a0, a2 +; RV32IB-ZALRSC-PERM-NEXT: ret +; +; RV32IAB-LABEL: atomicrmw_max_i32_seq_cst: +; RV32IAB: # %bb.0: +; RV32IAB-NEXT: amomax.w.aqrl a0, a1, (a0) +; RV32IAB-NEXT: ret +; +; RV64IB-ZALRSC-LABEL: atomicrmw_max_i32_seq_cst: +; RV64IB-ZALRSC: # %bb.0: +; RV64IB-ZALRSC-NEXT: sext.w a2, a1 +; RV64IB-ZALRSC-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-NEXT: lr.w.aqrl a1, (a0) +; RV64IB-ZALRSC-NEXT: mv a3, a1 +; RV64IB-ZALRSC-NEXT: bge a3, a2, .LBB0_3 +; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB0_1 Depth=1 +; RV64IB-ZALRSC-NEXT: mv a3, a2 +; RV64IB-ZALRSC-NEXT: .LBB0_3: # in Loop: Header=BB0_1 Depth=1 +; RV64IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0) +; RV64IB-ZALRSC-NEXT: bnez a3, .LBB0_1 +; RV64IB-ZALRSC-NEXT: # %bb.4: +; RV64IB-ZALRSC-NEXT: mv a0, a1 +; RV64IB-ZALRSC-NEXT: ret +; +; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_max_i32_seq_cst: +; RV64IB-ZALRSC-PERM: # %bb.0: +; RV64IB-ZALRSC-PERM-NEXT: sext.w a2, a1 +; RV64IB-ZALRSC-PERM-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-PERM-NEXT: lr.w.aqrl a1, (a0) +; RV64IB-ZALRSC-PERM-NEXT: max a3, a1, a2 +; RV64IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0) +; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB0_1 +; RV64IB-ZALRSC-PERM-NEXT: # %bb.2: +; RV64IB-ZALRSC-PERM-NEXT: mv a0, a1 +; RV64IB-ZALRSC-PERM-NEXT: ret +; +; RV64IAB-LABEL: atomicrmw_max_i32_seq_cst: +; RV64IAB: # %bb.0: +; RV64IAB-NEXT: amomax.w.aqrl a0, a1, (a0) +; RV64IAB-NEXT: ret + %1 = atomicrmw max ptr %a, i32 %b seq_cst + ret i32 %1 +} + +define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind { +; RV32IB-ZALRSC-LABEL: atomicrmw_min_i32_seq_cst: +; RV32IB-ZALRSC: # %bb.0: +; RV32IB-ZALRSC-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 +; RV32IB-ZALRSC-NEXT: lr.w.aqrl a2, (a0) +; RV32IB-ZALRSC-NEXT: mv a3, a2 +; RV32IB-ZALRSC-NEXT: bge a1, a3, .LBB1_3 +; RV32IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB1_1 Depth=1 +; RV32IB-ZALRSC-NEXT: mv a3, a1 +; RV32IB-ZALRSC-NEXT: .LBB1_3: # in Loop: Header=BB1_1 Depth=1 +; RV32IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0) +; RV32IB-ZALRSC-NEXT: bnez a3, .LBB1_1 +; RV32IB-ZALRSC-NEXT: # %bb.4: +; RV32IB-ZALRSC-NEXT: mv a0, a2 +; RV32IB-ZALRSC-NEXT: ret +; +; RV32IB-ZALRSC-PERM-LABEL: atomicrmw_min_i32_seq_cst: +; RV32IB-ZALRSC-PERM: # %bb.0: +; RV32IB-ZALRSC-PERM-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 +; RV32IB-ZALRSC-PERM-NEXT: lr.w.aqrl a2, (a0) +; RV32IB-ZALRSC-PERM-NEXT: min a3, a2, a1 +; RV32IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0) +; RV32IB-ZALRSC-PERM-NEXT: bnez a3, .LBB1_1 +; RV32IB-ZALRSC-PERM-NEXT: # %bb.2: +; RV32IB-ZALRSC-PERM-NEXT: mv a0, a2 +; RV32IB-ZALRSC-PERM-NEXT: ret +; +; RV32IAB-LABEL: atomicrmw_min_i32_seq_cst: +; RV32IAB: # %bb.0: +; RV32IAB-NEXT: amomin.w.aqrl a0, a1, (a0) +; RV32IAB-NEXT: ret +; +; RV64IB-ZALRSC-LABEL: atomicrmw_min_i32_seq_cst: +; RV64IB-ZALRSC: # %bb.0: +; RV64IB-ZALRSC-NEXT: sext.w a2, a1 +; RV64IB-ZALRSC-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-NEXT: lr.w.aqrl a1, (a0) +; RV64IB-ZALRSC-NEXT: mv a3, a1 +; RV64IB-ZALRSC-NEXT: bge a2, a3, .LBB1_3 +; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB1_1 Depth=1 +; RV64IB-ZALRSC-NEXT: mv a3, a2 +; RV64IB-ZALRSC-NEXT: .LBB1_3: # in Loop: Header=BB1_1 Depth=1 +; RV64IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0) +; RV64IB-ZALRSC-NEXT: bnez a3, .LBB1_1 +; RV64IB-ZALRSC-NEXT: # %bb.4: +; RV64IB-ZALRSC-NEXT: mv a0, a1 +; RV64IB-ZALRSC-NEXT: ret +; +; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_min_i32_seq_cst: +; RV64IB-ZALRSC-PERM: # %bb.0: +; RV64IB-ZALRSC-PERM-NEXT: sext.w a2, a1 +; RV64IB-ZALRSC-PERM-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-PERM-NEXT: lr.w.aqrl a1, (a0) +; RV64IB-ZALRSC-PERM-NEXT: min a3, a1, a2 +; RV64IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0) +; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB1_1 +; RV64IB-ZALRSC-PERM-NEXT: # %bb.2: +; RV64IB-ZALRSC-PERM-NEXT: mv a0, a1 +; RV64IB-ZALRSC-PERM-NEXT: ret +; +; RV64IAB-LABEL: atomicrmw_min_i32_seq_cst: +; RV64IAB: # %bb.0: +; RV64IAB-NEXT: amomin.w.aqrl a0, a1, (a0) +; RV64IAB-NEXT: ret + %1 = atomicrmw min ptr %a, i32 %b seq_cst + ret i32 %1 +} + +define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind { +; RV32IB-ZALRSC-LABEL: atomicrmw_umax_i32_seq_cst: +; RV32IB-ZALRSC: # %bb.0: +; RV32IB-ZALRSC-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 +; RV32IB-ZALRSC-NEXT: lr.w.aqrl a2, (a0) +; RV32IB-ZALRSC-NEXT: mv a3, a2 +; RV32IB-ZALRSC-NEXT: bgeu a3, a1, .LBB2_3 +; RV32IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB2_1 Depth=1 +; RV32IB-ZALRSC-NEXT: mv a3, a1 +; RV32IB-ZALRSC-NEXT: .LBB2_3: # in Loop: Header=BB2_1 Depth=1 +; RV32IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0) +; RV32IB-ZALRSC-NEXT: bnez a3, .LBB2_1 +; RV32IB-ZALRSC-NEXT: # %bb.4: +; RV32IB-ZALRSC-NEXT: mv a0, a2 +; RV32IB-ZALRSC-NEXT: ret +; +; RV32IB-ZALRSC-PERM-LABEL: atomicrmw_umax_i32_seq_cst: +; RV32IB-ZALRSC-PERM: # %bb.0: +; RV32IB-ZALRSC-PERM-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 +; RV32IB-ZALRSC-PERM-NEXT: lr.w.aqrl a2, (a0) +; RV32IB-ZALRSC-PERM-NEXT: maxu a3, a2, a1 +; RV32IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0) +; RV32IB-ZALRSC-PERM-NEXT: bnez a3, .LBB2_1 +; RV32IB-ZALRSC-PERM-NEXT: # %bb.2: +; RV32IB-ZALRSC-PERM-NEXT: mv a0, a2 +; RV32IB-ZALRSC-PERM-NEXT: ret +; +; RV32IAB-LABEL: atomicrmw_umax_i32_seq_cst: +; RV32IAB: # %bb.0: +; RV32IAB-NEXT: amomaxu.w.aqrl a0, a1, (a0) +; RV32IAB-NEXT: ret +; +; RV64IB-ZALRSC-LABEL: atomicrmw_umax_i32_seq_cst: +; RV64IB-ZALRSC: # %bb.0: +; RV64IB-ZALRSC-NEXT: sext.w a2, a1 +; RV64IB-ZALRSC-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-NEXT: lr.w.aqrl a1, (a0) +; RV64IB-ZALRSC-NEXT: mv a3, a1 +; RV64IB-ZALRSC-NEXT: bgeu a3, a2, .LBB2_3 +; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB2_1 Depth=1 +; RV64IB-ZALRSC-NEXT: mv a3, a2 +; RV64IB-ZALRSC-NEXT: .LBB2_3: # in Loop: Header=BB2_1 Depth=1 +; RV64IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0) +; RV64IB-ZALRSC-NEXT: bnez a3, .LBB2_1 +; RV64IB-ZALRSC-NEXT: # %bb.4: +; RV64IB-ZALRSC-NEXT: mv a0, a1 +; RV64IB-ZALRSC-NEXT: ret +; +; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_umax_i32_seq_cst: +; RV64IB-ZALRSC-PERM: # %bb.0: +; RV64IB-ZALRSC-PERM-NEXT: sext.w a2, a1 +; RV64IB-ZALRSC-PERM-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-PERM-NEXT: lr.w.aqrl a1, (a0) +; RV64IB-ZALRSC-PERM-NEXT: maxu a3, a1, a2 +; RV64IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0) +; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB2_1 +; RV64IB-ZALRSC-PERM-NEXT: # %bb.2: +; RV64IB-ZALRSC-PERM-NEXT: mv a0, a1 +; RV64IB-ZALRSC-PERM-NEXT: ret +; +; RV64IAB-LABEL: atomicrmw_umax_i32_seq_cst: +; RV64IAB: # %bb.0: +; RV64IAB-NEXT: amomaxu.w.aqrl a0, a1, (a0) +; RV64IAB-NEXT: ret + %1 = atomicrmw umax ptr %a, i32 %b seq_cst + ret i32 %1 +} + +define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind { +; RV32IB-ZALRSC-LABEL: atomicrmw_umin_i32_seq_cst: +; RV32IB-ZALRSC: # %bb.0: +; RV32IB-ZALRSC-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 +; RV32IB-ZALRSC-NEXT: lr.w.aqrl a2, (a0) +; RV32IB-ZALRSC-NEXT: mv a3, a2 +; RV32IB-ZALRSC-NEXT: bgeu a1, a3, .LBB3_3 +; RV32IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB3_1 Depth=1 +; RV32IB-ZALRSC-NEXT: mv a3, a1 +; RV32IB-ZALRSC-NEXT: .LBB3_3: # in Loop: Header=BB3_1 Depth=1 +; RV32IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0) +; RV32IB-ZALRSC-NEXT: bnez a3, .LBB3_1 +; RV32IB-ZALRSC-NEXT: # %bb.4: +; RV32IB-ZALRSC-NEXT: mv a0, a2 +; RV32IB-ZALRSC-NEXT: ret +; +; RV32IB-ZALRSC-PERM-LABEL: atomicrmw_umin_i32_seq_cst: +; RV32IB-ZALRSC-PERM: # %bb.0: +; RV32IB-ZALRSC-PERM-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 +; RV32IB-ZALRSC-PERM-NEXT: lr.w.aqrl a2, (a0) +; RV32IB-ZALRSC-PERM-NEXT: minu a3, a2, a1 +; RV32IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0) +; RV32IB-ZALRSC-PERM-NEXT: bnez a3, .LBB3_1 +; RV32IB-ZALRSC-PERM-NEXT: # %bb.2: +; RV32IB-ZALRSC-PERM-NEXT: mv a0, a2 +; RV32IB-ZALRSC-PERM-NEXT: ret +; +; RV32IAB-LABEL: atomicrmw_umin_i32_seq_cst: +; RV32IAB: # %bb.0: +; RV32IAB-NEXT: amominu.w.aqrl a0, a1, (a0) +; RV32IAB-NEXT: ret +; +; RV64IB-ZALRSC-LABEL: atomicrmw_umin_i32_seq_cst: +; RV64IB-ZALRSC: # %bb.0: +; RV64IB-ZALRSC-NEXT: sext.w a2, a1 +; RV64IB-ZALRSC-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-NEXT: lr.w.aqrl a1, (a0) +; RV64IB-ZALRSC-NEXT: mv a3, a1 +; RV64IB-ZALRSC-NEXT: bgeu a2, a3, .LBB3_3 +; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB3_1 Depth=1 +; RV64IB-ZALRSC-NEXT: mv a3, a2 +; RV64IB-ZALRSC-NEXT: .LBB3_3: # in Loop: Header=BB3_1 Depth=1 +; RV64IB-ZALRSC-NEXT: sc.w.rl a3, a3, (a0) +; RV64IB-ZALRSC-NEXT: bnez a3, .LBB3_1 +; RV64IB-ZALRSC-NEXT: # %bb.4: +; RV64IB-ZALRSC-NEXT: mv a0, a1 +; RV64IB-ZALRSC-NEXT: ret +; +; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_umin_i32_seq_cst: +; RV64IB-ZALRSC-PERM: # %bb.0: +; RV64IB-ZALRSC-PERM-NEXT: sext.w a2, a1 +; RV64IB-ZALRSC-PERM-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-PERM-NEXT: lr.w.aqrl a1, (a0) +; RV64IB-ZALRSC-PERM-NEXT: minu a3, a1, a2 +; RV64IB-ZALRSC-PERM-NEXT: sc.w.rl a3, a3, (a0) +; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB3_1 +; RV64IB-ZALRSC-PERM-NEXT: # %bb.2: +; RV64IB-ZALRSC-PERM-NEXT: mv a0, a1 +; RV64IB-ZALRSC-PERM-NEXT: ret +; +; RV64IAB-LABEL: atomicrmw_umin_i32_seq_cst: +; RV64IAB: # %bb.0: +; RV64IAB-NEXT: amominu.w.aqrl a0, a1, (a0) +; RV64IAB-NEXT: ret + %1 = atomicrmw umin ptr %a, i32 %b seq_cst + ret i32 %1 +} + +define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind { +; RV32IB-COMMON-LABEL: atomicrmw_max_i64_seq_cst: +; RV32IB-COMMON: # %bb.0: +; RV32IB-COMMON-NEXT: addi sp, sp, -32 +; RV32IB-COMMON-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: mv s0, a2 +; RV32IB-COMMON-NEXT: mv s1, a0 +; RV32IB-COMMON-NEXT: lw a4, 0(a0) +; RV32IB-COMMON-NEXT: lw a5, 4(a0) +; RV32IB-COMMON-NEXT: mv s2, a1 +; RV32IB-COMMON-NEXT: j .LBB4_2 +; RV32IB-COMMON-NEXT: .LBB4_1: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB4_2 Depth=1 +; RV32IB-COMMON-NEXT: sw a4, 8(sp) +; RV32IB-COMMON-NEXT: sw a5, 12(sp) +; RV32IB-COMMON-NEXT: addi a1, sp, 8 +; RV32IB-COMMON-NEXT: li a4, 5 +; RV32IB-COMMON-NEXT: li a5, 5 +; RV32IB-COMMON-NEXT: mv a0, s1 +; RV32IB-COMMON-NEXT: call __atomic_compare_exchange_8 +; RV32IB-COMMON-NEXT: lw a4, 8(sp) +; RV32IB-COMMON-NEXT: lw a5, 12(sp) +; RV32IB-COMMON-NEXT: bnez a0, .LBB4_7 +; RV32IB-COMMON-NEXT: .LBB4_2: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # =>This Inner Loop Header: Depth=1 +; RV32IB-COMMON-NEXT: beq a5, s0, .LBB4_4 +; RV32IB-COMMON-NEXT: # %bb.3: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB4_2 Depth=1 +; RV32IB-COMMON-NEXT: slt a0, s0, a5 +; RV32IB-COMMON-NEXT: j .LBB4_5 +; RV32IB-COMMON-NEXT: .LBB4_4: # in Loop: Header=BB4_2 Depth=1 +; RV32IB-COMMON-NEXT: sltu a0, s2, a4 +; RV32IB-COMMON-NEXT: .LBB4_5: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB4_2 Depth=1 +; RV32IB-COMMON-NEXT: mv a2, a4 +; RV32IB-COMMON-NEXT: mv a3, a5 +; RV32IB-COMMON-NEXT: bnez a0, .LBB4_1 +; RV32IB-COMMON-NEXT: # %bb.6: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB4_2 Depth=1 +; RV32IB-COMMON-NEXT: mv a2, s2 +; RV32IB-COMMON-NEXT: mv a3, s0 +; RV32IB-COMMON-NEXT: j .LBB4_1 +; RV32IB-COMMON-NEXT: .LBB4_7: # %atomicrmw.end +; RV32IB-COMMON-NEXT: mv a0, a4 +; RV32IB-COMMON-NEXT: mv a1, a5 +; RV32IB-COMMON-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: addi sp, sp, 32 +; RV32IB-COMMON-NEXT: ret +; +; RV64IB-ZALRSC-LABEL: atomicrmw_max_i64_seq_cst: +; RV64IB-ZALRSC: # %bb.0: +; RV64IB-ZALRSC-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-NEXT: lr.d.aqrl a2, (a0) +; RV64IB-ZALRSC-NEXT: mv a3, a2 +; RV64IB-ZALRSC-NEXT: bge a3, a1, .LBB4_3 +; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB4_1 Depth=1 +; RV64IB-ZALRSC-NEXT: mv a3, a1 +; RV64IB-ZALRSC-NEXT: .LBB4_3: # in Loop: Header=BB4_1 Depth=1 +; RV64IB-ZALRSC-NEXT: sc.d.rl a3, a3, (a0) +; RV64IB-ZALRSC-NEXT: bnez a3, .LBB4_1 +; RV64IB-ZALRSC-NEXT: # %bb.4: +; RV64IB-ZALRSC-NEXT: mv a0, a2 +; RV64IB-ZALRSC-NEXT: ret +; +; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_max_i64_seq_cst: +; RV64IB-ZALRSC-PERM: # %bb.0: +; RV64IB-ZALRSC-PERM-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-PERM-NEXT: lr.d.aqrl a2, (a0) +; RV64IB-ZALRSC-PERM-NEXT: max a3, a2, a1 +; RV64IB-ZALRSC-PERM-NEXT: sc.d.rl a3, a3, (a0) +; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB4_1 +; RV64IB-ZALRSC-PERM-NEXT: # %bb.2: +; RV64IB-ZALRSC-PERM-NEXT: mv a0, a2 +; RV64IB-ZALRSC-PERM-NEXT: ret +; +; RV64IAB-LABEL: atomicrmw_max_i64_seq_cst: +; RV64IAB: # %bb.0: +; RV64IAB-NEXT: amomax.d.aqrl a0, a1, (a0) +; RV64IAB-NEXT: ret + %1 = atomicrmw max ptr %a, i64 %b seq_cst + ret i64 %1 +} + +define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind { +; RV32IB-COMMON-LABEL: atomicrmw_min_i64_seq_cst: +; RV32IB-COMMON: # %bb.0: +; RV32IB-COMMON-NEXT: addi sp, sp, -32 +; RV32IB-COMMON-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: mv s0, a2 +; RV32IB-COMMON-NEXT: mv s1, a0 +; RV32IB-COMMON-NEXT: lw a4, 0(a0) +; RV32IB-COMMON-NEXT: lw a5, 4(a0) +; RV32IB-COMMON-NEXT: mv s2, a1 +; RV32IB-COMMON-NEXT: j .LBB5_2 +; RV32IB-COMMON-NEXT: .LBB5_1: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB5_2 Depth=1 +; RV32IB-COMMON-NEXT: sw a4, 8(sp) +; RV32IB-COMMON-NEXT: sw a5, 12(sp) +; RV32IB-COMMON-NEXT: addi a1, sp, 8 +; RV32IB-COMMON-NEXT: li a4, 5 +; RV32IB-COMMON-NEXT: li a5, 5 +; RV32IB-COMMON-NEXT: mv a0, s1 +; RV32IB-COMMON-NEXT: call __atomic_compare_exchange_8 +; RV32IB-COMMON-NEXT: lw a4, 8(sp) +; RV32IB-COMMON-NEXT: lw a5, 12(sp) +; RV32IB-COMMON-NEXT: bnez a0, .LBB5_7 +; RV32IB-COMMON-NEXT: .LBB5_2: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # =>This Inner Loop Header: Depth=1 +; RV32IB-COMMON-NEXT: beq a5, s0, .LBB5_4 +; RV32IB-COMMON-NEXT: # %bb.3: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB5_2 Depth=1 +; RV32IB-COMMON-NEXT: slt a0, a5, s0 +; RV32IB-COMMON-NEXT: j .LBB5_5 +; RV32IB-COMMON-NEXT: .LBB5_4: # in Loop: Header=BB5_2 Depth=1 +; RV32IB-COMMON-NEXT: sltu a0, a4, s2 +; RV32IB-COMMON-NEXT: .LBB5_5: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB5_2 Depth=1 +; RV32IB-COMMON-NEXT: mv a2, a4 +; RV32IB-COMMON-NEXT: mv a3, a5 +; RV32IB-COMMON-NEXT: bnez a0, .LBB5_1 +; RV32IB-COMMON-NEXT: # %bb.6: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB5_2 Depth=1 +; RV32IB-COMMON-NEXT: mv a2, s2 +; RV32IB-COMMON-NEXT: mv a3, s0 +; RV32IB-COMMON-NEXT: j .LBB5_1 +; RV32IB-COMMON-NEXT: .LBB5_7: # %atomicrmw.end +; RV32IB-COMMON-NEXT: mv a0, a4 +; RV32IB-COMMON-NEXT: mv a1, a5 +; RV32IB-COMMON-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: addi sp, sp, 32 +; RV32IB-COMMON-NEXT: ret +; +; RV64IB-ZALRSC-LABEL: atomicrmw_min_i64_seq_cst: +; RV64IB-ZALRSC: # %bb.0: +; RV64IB-ZALRSC-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-NEXT: lr.d.aqrl a2, (a0) +; RV64IB-ZALRSC-NEXT: mv a3, a2 +; RV64IB-ZALRSC-NEXT: bge a1, a3, .LBB5_3 +; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 +; RV64IB-ZALRSC-NEXT: mv a3, a1 +; RV64IB-ZALRSC-NEXT: .LBB5_3: # in Loop: Header=BB5_1 Depth=1 +; RV64IB-ZALRSC-NEXT: sc.d.rl a3, a3, (a0) +; RV64IB-ZALRSC-NEXT: bnez a3, .LBB5_1 +; RV64IB-ZALRSC-NEXT: # %bb.4: +; RV64IB-ZALRSC-NEXT: mv a0, a2 +; RV64IB-ZALRSC-NEXT: ret +; +; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_min_i64_seq_cst: +; RV64IB-ZALRSC-PERM: # %bb.0: +; RV64IB-ZALRSC-PERM-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-PERM-NEXT: lr.d.aqrl a2, (a0) +; RV64IB-ZALRSC-PERM-NEXT: min a3, a2, a1 +; RV64IB-ZALRSC-PERM-NEXT: sc.d.rl a3, a3, (a0) +; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB5_1 +; RV64IB-ZALRSC-PERM-NEXT: # %bb.2: +; RV64IB-ZALRSC-PERM-NEXT: mv a0, a2 +; RV64IB-ZALRSC-PERM-NEXT: ret +; +; RV64IAB-LABEL: atomicrmw_min_i64_seq_cst: +; RV64IAB: # %bb.0: +; RV64IAB-NEXT: amomin.d.aqrl a0, a1, (a0) +; RV64IAB-NEXT: ret + %1 = atomicrmw min ptr %a, i64 %b seq_cst + ret i64 %1 +} + +define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind { +; RV32IB-COMMON-LABEL: atomicrmw_umax_i64_seq_cst: +; RV32IB-COMMON: # %bb.0: +; RV32IB-COMMON-NEXT: addi sp, sp, -32 +; RV32IB-COMMON-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: mv s0, a2 +; RV32IB-COMMON-NEXT: mv s1, a0 +; RV32IB-COMMON-NEXT: lw a4, 0(a0) +; RV32IB-COMMON-NEXT: lw a5, 4(a0) +; RV32IB-COMMON-NEXT: mv s2, a1 +; RV32IB-COMMON-NEXT: j .LBB6_2 +; RV32IB-COMMON-NEXT: .LBB6_1: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB6_2 Depth=1 +; RV32IB-COMMON-NEXT: sw a4, 8(sp) +; RV32IB-COMMON-NEXT: sw a5, 12(sp) +; RV32IB-COMMON-NEXT: addi a1, sp, 8 +; RV32IB-COMMON-NEXT: li a4, 5 +; RV32IB-COMMON-NEXT: li a5, 5 +; RV32IB-COMMON-NEXT: mv a0, s1 +; RV32IB-COMMON-NEXT: call __atomic_compare_exchange_8 +; RV32IB-COMMON-NEXT: lw a4, 8(sp) +; RV32IB-COMMON-NEXT: lw a5, 12(sp) +; RV32IB-COMMON-NEXT: bnez a0, .LBB6_7 +; RV32IB-COMMON-NEXT: .LBB6_2: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # =>This Inner Loop Header: Depth=1 +; RV32IB-COMMON-NEXT: beq a5, s0, .LBB6_4 +; RV32IB-COMMON-NEXT: # %bb.3: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB6_2 Depth=1 +; RV32IB-COMMON-NEXT: sltu a0, s0, a5 +; RV32IB-COMMON-NEXT: j .LBB6_5 +; RV32IB-COMMON-NEXT: .LBB6_4: # in Loop: Header=BB6_2 Depth=1 +; RV32IB-COMMON-NEXT: sltu a0, s2, a4 +; RV32IB-COMMON-NEXT: .LBB6_5: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB6_2 Depth=1 +; RV32IB-COMMON-NEXT: mv a2, a4 +; RV32IB-COMMON-NEXT: mv a3, a5 +; RV32IB-COMMON-NEXT: bnez a0, .LBB6_1 +; RV32IB-COMMON-NEXT: # %bb.6: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB6_2 Depth=1 +; RV32IB-COMMON-NEXT: mv a2, s2 +; RV32IB-COMMON-NEXT: mv a3, s0 +; RV32IB-COMMON-NEXT: j .LBB6_1 +; RV32IB-COMMON-NEXT: .LBB6_7: # %atomicrmw.end +; RV32IB-COMMON-NEXT: mv a0, a4 +; RV32IB-COMMON-NEXT: mv a1, a5 +; RV32IB-COMMON-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: addi sp, sp, 32 +; RV32IB-COMMON-NEXT: ret +; +; RV64IB-ZALRSC-LABEL: atomicrmw_umax_i64_seq_cst: +; RV64IB-ZALRSC: # %bb.0: +; RV64IB-ZALRSC-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-NEXT: lr.d.aqrl a2, (a0) +; RV64IB-ZALRSC-NEXT: mv a3, a2 +; RV64IB-ZALRSC-NEXT: bgeu a3, a1, .LBB6_3 +; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 +; RV64IB-ZALRSC-NEXT: mv a3, a1 +; RV64IB-ZALRSC-NEXT: .LBB6_3: # in Loop: Header=BB6_1 Depth=1 +; RV64IB-ZALRSC-NEXT: sc.d.rl a3, a3, (a0) +; RV64IB-ZALRSC-NEXT: bnez a3, .LBB6_1 +; RV64IB-ZALRSC-NEXT: # %bb.4: +; RV64IB-ZALRSC-NEXT: mv a0, a2 +; RV64IB-ZALRSC-NEXT: ret +; +; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_umax_i64_seq_cst: +; RV64IB-ZALRSC-PERM: # %bb.0: +; RV64IB-ZALRSC-PERM-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-PERM-NEXT: lr.d.aqrl a2, (a0) +; RV64IB-ZALRSC-PERM-NEXT: maxu a3, a2, a1 +; RV64IB-ZALRSC-PERM-NEXT: sc.d.rl a3, a3, (a0) +; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB6_1 +; RV64IB-ZALRSC-PERM-NEXT: # %bb.2: +; RV64IB-ZALRSC-PERM-NEXT: mv a0, a2 +; RV64IB-ZALRSC-PERM-NEXT: ret +; +; RV64IAB-LABEL: atomicrmw_umax_i64_seq_cst: +; RV64IAB: # %bb.0: +; RV64IAB-NEXT: amomaxu.d.aqrl a0, a1, (a0) +; RV64IAB-NEXT: ret + %1 = atomicrmw umax ptr %a, i64 %b seq_cst + ret i64 %1 +} + +define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind { +; RV32IB-COMMON-LABEL: atomicrmw_umin_i64_seq_cst: +; RV32IB-COMMON: # %bb.0: +; RV32IB-COMMON-NEXT: addi sp, sp, -32 +; RV32IB-COMMON-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32IB-COMMON-NEXT: mv s0, a2 +; RV32IB-COMMON-NEXT: mv s1, a0 +; RV32IB-COMMON-NEXT: lw a4, 0(a0) +; RV32IB-COMMON-NEXT: lw a5, 4(a0) +; RV32IB-COMMON-NEXT: mv s2, a1 +; RV32IB-COMMON-NEXT: j .LBB7_2 +; RV32IB-COMMON-NEXT: .LBB7_1: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB7_2 Depth=1 +; RV32IB-COMMON-NEXT: sw a4, 8(sp) +; RV32IB-COMMON-NEXT: sw a5, 12(sp) +; RV32IB-COMMON-NEXT: addi a1, sp, 8 +; RV32IB-COMMON-NEXT: li a4, 5 +; RV32IB-COMMON-NEXT: li a5, 5 +; RV32IB-COMMON-NEXT: mv a0, s1 +; RV32IB-COMMON-NEXT: call __atomic_compare_exchange_8 +; RV32IB-COMMON-NEXT: lw a4, 8(sp) +; RV32IB-COMMON-NEXT: lw a5, 12(sp) +; RV32IB-COMMON-NEXT: bnez a0, .LBB7_7 +; RV32IB-COMMON-NEXT: .LBB7_2: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # =>This Inner Loop Header: Depth=1 +; RV32IB-COMMON-NEXT: beq a5, s0, .LBB7_4 +; RV32IB-COMMON-NEXT: # %bb.3: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB7_2 Depth=1 +; RV32IB-COMMON-NEXT: sltu a0, a5, s0 +; RV32IB-COMMON-NEXT: j .LBB7_5 +; RV32IB-COMMON-NEXT: .LBB7_4: # in Loop: Header=BB7_2 Depth=1 +; RV32IB-COMMON-NEXT: sltu a0, a4, s2 +; RV32IB-COMMON-NEXT: .LBB7_5: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB7_2 Depth=1 +; RV32IB-COMMON-NEXT: mv a2, a4 +; RV32IB-COMMON-NEXT: mv a3, a5 +; RV32IB-COMMON-NEXT: bnez a0, .LBB7_1 +; RV32IB-COMMON-NEXT: # %bb.6: # %atomicrmw.start +; RV32IB-COMMON-NEXT: # in Loop: Header=BB7_2 Depth=1 +; RV32IB-COMMON-NEXT: mv a2, s2 +; RV32IB-COMMON-NEXT: mv a3, s0 +; RV32IB-COMMON-NEXT: j .LBB7_1 +; RV32IB-COMMON-NEXT: .LBB7_7: # %atomicrmw.end +; RV32IB-COMMON-NEXT: mv a0, a4 +; RV32IB-COMMON-NEXT: mv a1, a5 +; RV32IB-COMMON-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32IB-COMMON-NEXT: addi sp, sp, 32 +; RV32IB-COMMON-NEXT: ret +; +; RV64IB-ZALRSC-LABEL: atomicrmw_umin_i64_seq_cst: +; RV64IB-ZALRSC: # %bb.0: +; RV64IB-ZALRSC-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-NEXT: lr.d.aqrl a2, (a0) +; RV64IB-ZALRSC-NEXT: mv a3, a2 +; RV64IB-ZALRSC-NEXT: bgeu a1, a3, .LBB7_3 +; RV64IB-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 +; RV64IB-ZALRSC-NEXT: mv a3, a1 +; RV64IB-ZALRSC-NEXT: .LBB7_3: # in Loop: Header=BB7_1 Depth=1 +; RV64IB-ZALRSC-NEXT: sc.d.rl a3, a3, (a0) +; RV64IB-ZALRSC-NEXT: bnez a3, .LBB7_1 +; RV64IB-ZALRSC-NEXT: # %bb.4: +; RV64IB-ZALRSC-NEXT: mv a0, a2 +; RV64IB-ZALRSC-NEXT: ret +; +; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_umin_i64_seq_cst: +; RV64IB-ZALRSC-PERM: # %bb.0: +; RV64IB-ZALRSC-PERM-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 +; RV64IB-ZALRSC-PERM-NEXT: lr.d.aqrl a2, (a0) +; RV64IB-ZALRSC-PERM-NEXT: minu a3, a2, a1 +; RV64IB-ZALRSC-PERM-NEXT: sc.d.rl a3, a3, (a0) +; RV64IB-ZALRSC-PERM-NEXT: bnez a3, .LBB7_1 +; RV64IB-ZALRSC-PERM-NEXT: # %bb.2: +; RV64IB-ZALRSC-PERM-NEXT: mv a0, a2 +; RV64IB-ZALRSC-PERM-NEXT: ret +; +; RV64IAB-LABEL: atomicrmw_umin_i64_seq_cst: +; RV64IAB: # %bb.0: +; RV64IAB-NEXT: amominu.d.aqrl a0, a1, (a0) +; RV64IAB-NEXT: ret + %1 = atomicrmw umin ptr %a, i64 %b seq_cst + ret i64 %1 +} diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll index 5e5f2b7..37e11db 100644 --- a/llvm/test/CodeGen/RISCV/features-info.ll +++ b/llvm/test/CodeGen/RISCV/features-info.ll @@ -81,6 +81,7 @@ ; CHECK-NEXT: optimized-nf7-segment-load-store - vlseg7eN.v and vsseg7eN.v are implemented as a wide memory op and shuffle. ; CHECK-NEXT: optimized-nf8-segment-load-store - vlseg8eN.v and vsseg8eN.v are implemented as a wide memory op and shuffle. ; CHECK-NEXT: optimized-zero-stride-load - Optimized (perform fewer memory operations)zero-stride vector load. +; CHECK-NEXT: permissive-zalrsc - Implementation permits non-base instructions between LR/SC pairs. ; CHECK-NEXT: predictable-select-expensive - Prefer likely predicted branches over selects. ; CHECK-NEXT: prefer-vsetvli-over-read-vlenb - Prefer vsetvli over read vlenb CSR to calculate VLEN. ; CHECK-NEXT: prefer-w-inst - Prefer instructions with W suffix. diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveActiveMin.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveActiveMin.ll new file mode 100644 index 0000000..d121c1a --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveActiveMin.ll @@ -0,0 +1,57 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-vulkan-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan-unknown %s -o - -filetype=obj | spirv-val %} + +; Test lowering to spir-v backend for various types and scalar/vector + +; CHECK: OpCapability GroupNonUniformArithmetic + +; CHECK-DAG: %[[#f16:]] = OpTypeFloat 16 +; CHECK-DAG: %[[#f32:]] = OpTypeFloat 32 +; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#v4_half:]] = OpTypeVector %[[#f16]] 4 +; CHECK-DAG: %[[#scope:]] = OpConstant %[[#uint]] 3 + +; CHECK-LABEL: Begin function test_float +; CHECK: %[[#fexpr:]] = OpFunctionParameter %[[#f32]] +define float @test_float(float %fexpr) { +entry: +; CHECK: %[[#fret:]] = OpGroupNonUniformFMin %[[#f32]] %[[#scope]] Reduce %[[#fexpr]] + %0 = call float @llvm.spv.wave.reduce.min.f32(float %fexpr) + ret float %0 +} + +; CHECK-LABEL: Begin function test_int_signed +; CHECK: %[[#iexpr:]] = OpFunctionParameter %[[#uint]] +define i32 @test_int_signed(i32 %iexpr) { +entry: +; CHECK: %[[#iret:]] = OpGroupNonUniformSMin %[[#uint]] %[[#scope]] Reduce %[[#iexpr]] + %0 = call i32 @llvm.spv.wave.reduce.min.i32(i32 %iexpr) + ret i32 %0 +} + +; CHECK-LABEL: Begin function test_int_unsigned +; CHECK: %[[#iexpr:]] = OpFunctionParameter %[[#uint]] +define i32 @test_int_unsigned(i32 %iexpr) { +entry: +; CHECK: %[[#iret:]] = OpGroupNonUniformUMin %[[#uint]] %[[#scope]] Reduce %[[#iexpr]] + %0 = call i32 @llvm.spv.wave.reduce.umin.i32(i32 %iexpr) + ret i32 %0 +} + +; CHECK-LABEL: Begin function test_vhalf +; CHECK: %[[#vbexpr:]] = OpFunctionParameter %[[#v4_half]] +define <4 x half> @test_vhalf(<4 x half> %vbexpr) { +entry: +; CHECK: %[[#vhalfret:]] = OpGroupNonUniformFMin %[[#v4_half]] %[[#scope]] Reduce %[[#vbexpr]] + %0 = call <4 x half> @llvm.spv.wave.reduce.min.v4half(<4 x half> %vbexpr) + ret <4 x half> %0 +} + +declare float @llvm.spv.wave.reduce.min.f32(float) +declare i32 @llvm.spv.wave.reduce.min.i32(i32) +declare <4 x half> @llvm.spv.wave.reduce.min.v4half(<4 x half>) + +declare float @llvm.spv.wave.reduce.umin.f32(float) +declare i32 @llvm.spv.wave.reduce.umin.i32(i32) +declare <4 x half> @llvm.spv.wave.reduce.umin.v4half(<4 x half>) + diff --git a/llvm/test/MC/AMDGPU/buffer-op-swz-operand.s b/llvm/test/MC/AMDGPU/buffer-op-swz-operand.s index 8bd9148..4542027 100644 --- a/llvm/test/MC/AMDGPU/buffer-op-swz-operand.s +++ b/llvm/test/MC/AMDGPU/buffer-op-swz-operand.s @@ -2,7 +2,7 @@ // CHECK: .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" buffer_load_dwordx4 v[0:3], v0, s[0:3], 0, offen offset:4092 slc -// CHECK: buffer_load_b128 v[0:3], v0, s[0:3], 0 offen offset:4092 slc ; <MCInst #13135 BUFFER_LOAD_DWORDX4_OFFEN_gfx11 +// CHECK: buffer_load_b128 v[0:3], v0, s[0:3], 0 offen offset:4092 slc ; <MCInst #{{[0-9]+}} BUFFER_LOAD_DWORDX4_OFFEN_gfx11 // CHECK-NEXT: ; <MCOperand Reg:10104> // CHECK-NEXT: ; <MCOperand Reg:486> // CHECK-NEXT: ; <MCOperand Reg:7754> @@ -11,7 +11,7 @@ buffer_load_dwordx4 v[0:3], v0, s[0:3], 0, offen offset:4092 slc // CHECK-NEXT: ; <MCOperand Imm:2> // CHECK-NEXT: ; <MCOperand Imm:0>> buffer_store_dword v0, v1, s[0:3], 0 offen slc -// CHECK: buffer_store_b32 v0, v1, s[0:3], 0 offen slc ; <MCInst #14553 BUFFER_STORE_DWORD_OFFEN_gfx11 +// CHECK: buffer_store_b32 v0, v1, s[0:3], 0 offen slc ; <MCInst #{{[0-9]+}} BUFFER_STORE_DWORD_OFFEN_gfx11 // CHECK-NEXT: ; <MCOperand Reg:486> // CHECK-NEXT: ; <MCOperand Reg:487> // CHECK-NEXT: ; <MCOperand Reg:7754> @@ -22,7 +22,7 @@ buffer_store_dword v0, v1, s[0:3], 0 offen slc ; tbuffer ops use autogenerate asm parsers tbuffer_load_format_xyzw v[0:3], v0, s[0:3], 0 format:[BUF_FMT_32_32_SINT] offen offset:4092 slc -// CHECK: tbuffer_load_format_xyzw v[0:3], v0, s[0:3], 0 format:[BUF_FMT_32_32_SINT] offen offset:4092 slc ; <MCInst #34095 TBUFFER_LOAD_FORMAT_XYZW_OFFEN_gfx11 +// CHECK: tbuffer_load_format_xyzw v[0:3], v0, s[0:3], 0 format:[BUF_FMT_32_32_SINT] offen offset:4092 slc ; <MCInst #{{[0-9]+}} TBUFFER_LOAD_FORMAT_XYZW_OFFEN_gfx11 // CHECK-NEXT: ; <MCOperand Reg:10104> // CHECK-NEXT: ; <MCOperand Reg:486> // CHECK-NEXT: ; <MCOperand Reg:7754> @@ -32,7 +32,7 @@ tbuffer_load_format_xyzw v[0:3], v0, s[0:3], 0 format:[BUF_FMT_32_32_SINT] offen // CHECK-NEXT: ; <MCOperand Imm:2> // CHECK-NEXT: ; <MCOperand Imm:0>> tbuffer_store_d16_format_x v0, v1, s[0:3], 0 format:[BUF_FMT_10_10_10_2_SNORM] offen slc -// CHECK: tbuffer_store_d16_format_x v0, v1, s[0:3], 0 format:[BUF_FMT_10_10_10_2_SNORM] offen slc ; <MCInst #34264 TBUFFER_STORE_FORMAT_D16_X_OFFEN_gfx11 +// CHECK: tbuffer_store_d16_format_x v0, v1, s[0:3], 0 format:[BUF_FMT_10_10_10_2_SNORM] offen slc ; <MCInst #{{[0-9]+}} TBUFFER_STORE_FORMAT_D16_X_OFFEN_gfx11 // CHECK-NEXT: ; <MCOperand Reg:486> // CHECK-NEXT: ; <MCOperand Reg:487> // CHECK-NEXT: ; <MCOperand Reg:7754> diff --git a/llvm/test/MC/RISCV/xqcili-linker-relaxation.s b/llvm/test/MC/RISCV/xqcili-linker-relaxation.s new file mode 100644 index 0000000..ace6779 --- /dev/null +++ b/llvm/test/MC/RISCV/xqcili-linker-relaxation.s @@ -0,0 +1,37 @@ +# RUN: llvm-mc --triple=riscv32 -mattr=+relax,+experimental-xqcili \ +# RUN: %s -filetype=obj -o - -riscv-add-build-attributes \ +# RUN: | llvm-objdump -dr -M no-aliases - \ +# RUN: | FileCheck %s + +## This tests that we correctly emit relocations for linker relaxation when +## emitting `QC.E.LI` and `QC.LI`. + + .section .text.ex1, "ax", @progbits +# CHECK-LABEL: <.text.ex1>: + blez a1, .L1 +# CHECK-NEXT: bge zero, a1, 0x0 <.text.ex1> +# CHECK-NEXT: R_RISCV_BRANCH .L1{{$}} + qc.e.li a0, sym +# CHECK-NEXT: qc.e.li a0, 0x0 +# CHECK-NEXT: R_RISCV_VENDOR QUALCOMM{{$}} +# CHECK-NEXT: R_RISCV_CUSTOM194 sym{{$}} +# CHECK-NEXT: R_RISCV_RELAX *ABS*{{$}} +.L1: +# CHECK: <.L1>: + ret +# CHECK-NEXT: c.jr ra + + .section .text.ex2, "ax", @progbits +# CHECK-LABEL: <.text.ex2>: + blez a1, .L2 +# CHECK-NEXT: bge zero, a1, 0x0 <.text.ex2> +# CHECK-NEXT: R_RISCV_BRANCH .L2{{$}} + qc.li a0, %qc.abs20(sym) +# CHECK-NEXT: qc.li a0, 0x0 +# CHECK-NEXT: R_RISCV_VENDOR QUALCOMM{{$}} +# CHECK-NEXT: R_RISCV_CUSTOM192 sym{{$}} +# CHECK-NEXT: R_RISCV_RELAX *ABS*{{$}} +.L2: +# CHECK: <.L2>: + ret +# CHECK-NEXT: c.jr ra diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll index 964a257..fafa82c 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll @@ -2800,6 +2800,88 @@ exit: ret i64 %r.0.lcssa } +define i32 @reduction_expression_ext_mulacc_livein(ptr %a, i16 %c) { +; CHECK-LABEL: define i32 @reduction_expression_ext_mulacc_livein( +; CHECK-SAME: ptr [[A:%.*]], i16 [[C:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i16> poison, i16 [[C]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i16> [[BROADCAST_SPLATINSERT]], <4 x i16> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP0]], align 1 +; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[WIDE_LOAD]] to <4 x i16> +; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i16> [[BROADCAST_SPLAT]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[TMP2]] to <4 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]]) +; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br label %[[FOR_EXIT:.*]] +; CHECK: [[FOR_EXIT]]: +; CHECK-NEXT: ret i32 [[TMP5]] +; +; CHECK-INTERLEAVED-LABEL: define i32 @reduction_expression_ext_mulacc_livein( +; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], i16 [[C:%.*]]) { +; CHECK-INTERLEAVED-NEXT: [[ENTRY:.*:]] +; CHECK-INTERLEAVED-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK-INTERLEAVED: [[VECTOR_PH]]: +; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i16> poison, i16 [[C]], i64 0 +; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i16> [[BROADCAST_SPLATINSERT]], <4 x i16> poison, <4 x i32> zeroinitializer +; CHECK-INTERLEAVED-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK-INTERLEAVED: [[VECTOR_BODY]]: +; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 4 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP0]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <4 x i8> [[WIDE_LOAD]] to <4 x i16> +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[WIDE_LOAD2]] to <4 x i16> +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul <4 x i16> [[BROADCAST_SPLAT]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul <4 x i16> [[BROADCAST_SPLAT]], [[TMP3]] +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <4 x i16> [[TMP4]] to <4 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP6]]) +; CHECK-INTERLEAVED-NEXT: [[TMP8]] = add i32 [[VEC_PHI]], [[TMP7]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <4 x i16> [[TMP5]] to <4 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP9]]) +; CHECK-INTERLEAVED-NEXT: [[TMP11]] = add i32 [[VEC_PHI1]], [[TMP10]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP11]], [[TMP8]] +; CHECK-INTERLEAVED-NEXT: br label %[[FOR_EXIT:.*]] +; CHECK-INTERLEAVED: [[FOR_EXIT]]: +; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]] +; +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %accum = phi i32 [ 0, %entry ], [ %add, %for.body ] + %gep.a = getelementptr i8, ptr %a, i64 %iv + %load.a = load i8, ptr %gep.a, align 1 + %ext.a = zext i8 %load.a to i16 + %mul = mul i16 %c, %ext.a + %mul.ext = zext i16 %mul to i32 + %add = add i32 %mul.ext, %accum + %iv.next = add i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, 1024 + br i1 %exitcond.not, label %for.exit, label %for.body + +for.exit: ; preds = %for.body + ret i32 %add +} + declare float @llvm.fmuladd.f32(float, float, float) !6 = distinct !{!6, !7, !8} diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll index 06b0448..291ada8 100644 --- a/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll @@ -800,3 +800,545 @@ exit: %r.0.lcssa = phi i64 [ %rdx.next, %loop ] ret i64 %r.0.lcssa } + +define i32 @print_mulacc_extended_const(ptr %start, ptr %end) { +; CHECK-LABEL: 'print_mulacc_extended_const' +; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { +; CHECK-NEXT: Live-in vp<%0> = VF +; CHECK-NEXT: Live-in vp<%1> = VF * UF +; CHECK-NEXT: Live-in vp<%2> = vector-trip-count +; CHECK-NEXT: vp<%3> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<%3> = EXPAND SCEV (1 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%start> + vp<%2> * ir<1> +; CHECK-NEXT: EMIT vp<%5> = reduction-start-vector ir<0>, ir<0>, ir<1> +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<%6> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi vp<%5>, vp<%9> +; CHECK-NEXT: vp<%7> = SCALAR-STEPS vp<%6>, ir<1>, vp<%0> +; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%7> +; CHECK-NEXT: vp<%8> = vector-pointer vp<%next.gep> +; CHECK-NEXT: WIDEN ir<%l> = load vp<%8> +; CHECK-NEXT: EXPRESSION vp<%9> = ir<%red> + reduce.add (mul (ir<%l> zext to i32), (ir<63> zext to i32)) +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%6>, vp<%1> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%2> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<%11> = compute-reduction-result ir<%red>, vp<%9> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<%3>, vp<%2> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: IR %red.next.lcssa = phi i32 [ %red.next, %loop ] (extra operand: vp<%11> from middle.block) +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%4>, middle.block ], [ ir<%start>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%11>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<loop> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<loop>: +; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) +; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1 +; CHECK-NEXT: IR %l.ext = zext i8 %l to i32 +; CHECK-NEXT: IR %mul = mul i32 %l.ext, 63 +; CHECK-NEXT: IR %red.next = add i32 %red, %mul +; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 +; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK: VPlan 'Final VPlan for VF={4},UF={1}' { +; CHECK-NEXT: Live-in ir<%1> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: IR %start2 = ptrtoint ptr %start to i64 +; CHECK-NEXT: IR %end1 = ptrtoint ptr %end to i64 +; CHECK-NEXT: IR %0 = add i64 %end1, 1 +; CHECK-NEXT: IR %1 = sub i64 %0, %start2 +; CHECK-NEXT: EMIT vp<%min.iters.check> = icmp ult ir<%1>, ir<4> +; CHECK-NEXT: EMIT branch-on-cond vp<%min.iters.check> +; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<%1>, ir<4> +; CHECK-NEXT: EMIT vp<%n.vec> = sub ir<%1>, vp<%n.mod.vf> +; CHECK-NEXT: vp<%3> = DERIVED-IV ir<%start> + vp<%n.vec> * ir<1> +; CHECK-NEXT: Successor(s): vector.body +; CHECK-EMPTY: +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT-SCALAR vp<%index> = phi [ ir<0>, vector.ph ], [ vp<%index.next>, vector.body ] +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi ir<0>, ir<%red.next> +; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%index> +; CHECK-NEXT: WIDEN ir<%l> = load vp<%next.gep> +; CHECK-NEXT: WIDEN-CAST ir<%l.ext> = zext ir<%l> to i32 +; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%l.ext>, ir<63> +; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + reduce.add (ir<%mul>) +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%index>, ir<4> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%n.vec> +; CHECK-NEXT: Successor(s): middle.block, vector.body +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<%5> = compute-reduction-result ir<%red>, ir<%red.next> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%1>, vp<%n.vec> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, ir-bb<scalar.ph> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: IR %red.next.lcssa = phi i32 [ %red.next, %loop ] (extra operand: vp<%5> from middle.block) +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<scalar.ph>: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%3>, middle.block ], [ ir<%start>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%5>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<loop> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<loop>: +; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %scalar.ph ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from ir-bb<scalar.ph>) +; CHECK-NEXT: IR %red = phi i32 [ 0, %scalar.ph ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from ir-bb<scalar.ph>) +; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1 +; CHECK-NEXT: IR %l.ext = zext i8 %l to i32 +; CHECK-NEXT: IR %mul = mul i32 %l.ext, 63 +; CHECK-NEXT: IR %red.next = add i32 %red, %mul +; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 +; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end +; CHECK-NEXT: No successors +; CHECK-NEXT: } +entry: + br label %loop + +loop: + %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] + %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] + %l = load i8, ptr %ptr.iv, align 1 + %l.ext = zext i8 %l to i32 + %mul = mul i32 %l.ext, 63 + %red.next = add i32 %red, %mul + %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 + %ec = icmp eq ptr %ptr.iv, %end + br i1 %ec, label %exit, label %loop + +exit: + ret i32 %red.next +} + +; Constants >= 128 cannot be treated as sign-extended, so the expression shouldn't extend 128 +define i32 @print_mulacc_not_extended_const(ptr %start, ptr %end) { +; CHECK-LABEL: 'print_mulacc_not_extended_const' +; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { +; CHECK-NEXT: Live-in vp<%0> = VF +; CHECK-NEXT: Live-in vp<%1> = VF * UF +; CHECK-NEXT: Live-in vp<%2> = vector-trip-count +; CHECK-NEXT: vp<%3> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<%3> = EXPAND SCEV (1 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%start> + vp<%2> * ir<1> +; CHECK-NEXT: EMIT vp<%5> = reduction-start-vector ir<0>, ir<0>, ir<1> +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<%6> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi vp<%5>, vp<%9> +; CHECK-NEXT: vp<%7> = SCALAR-STEPS vp<%6>, ir<1>, vp<%0> +; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%7> +; CHECK-NEXT: vp<%8> = vector-pointer vp<%next.gep> +; CHECK-NEXT: WIDEN ir<%l> = load vp<%8> +; CHECK-NEXT: WIDEN-CAST ir<%l.ext> = sext ir<%l> to i32 +; CHECK-NEXT: EXPRESSION vp<%9> = ir<%red> + reduce.add (mul ir<%l.ext>, ir<128>) +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%6>, vp<%1> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%2> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<%11> = compute-reduction-result ir<%red>, vp<%9> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<%3>, vp<%2> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: IR %red.next.lcssa = phi i32 [ %red.next, %loop ] (extra operand: vp<%11> from middle.block) +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%4>, middle.block ], [ ir<%start>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%11>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<loop> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<loop>: +; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) +; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1 +; CHECK-NEXT: IR %l.ext = sext i8 %l to i32 +; CHECK-NEXT: IR %mul = mul i32 %l.ext, 128 +; CHECK-NEXT: IR %red.next = add i32 %red, %mul +; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 +; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK: VPlan 'Final VPlan for VF={4},UF={1}' { +; CHECK-NEXT: Live-in ir<%1> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: IR %start2 = ptrtoint ptr %start to i64 +; CHECK-NEXT: IR %end1 = ptrtoint ptr %end to i64 +; CHECK-NEXT: IR %0 = add i64 %end1, 1 +; CHECK-NEXT: IR %1 = sub i64 %0, %start2 +; CHECK-NEXT: EMIT vp<%min.iters.check> = icmp ult ir<%1>, ir<4> +; CHECK-NEXT: EMIT branch-on-cond vp<%min.iters.check> +; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<%1>, ir<4> +; CHECK-NEXT: EMIT vp<%n.vec> = sub ir<%1>, vp<%n.mod.vf> +; CHECK-NEXT: vp<%3> = DERIVED-IV ir<%start> + vp<%n.vec> * ir<1> +; CHECK-NEXT: Successor(s): vector.body +; CHECK-EMPTY: +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT-SCALAR vp<%index> = phi [ ir<0>, vector.ph ], [ vp<%index.next>, vector.body ] +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi ir<0>, ir<%red.next> +; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%index> +; CHECK-NEXT: WIDEN ir<%l> = load vp<%next.gep> +; CHECK-NEXT: WIDEN-CAST ir<%l.ext> = sext ir<%l> to i32 +; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%l.ext>, ir<128> +; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + reduce.add (ir<%mul>) +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%index>, ir<4> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%n.vec> +; CHECK-NEXT: Successor(s): middle.block, vector.body +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<%5> = compute-reduction-result ir<%red>, ir<%red.next> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%1>, vp<%n.vec> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, ir-bb<scalar.ph> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: IR %red.next.lcssa = phi i32 [ %red.next, %loop ] (extra operand: vp<%5> from middle.block) +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<scalar.ph>: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%3>, middle.block ], [ ir<%start>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%5>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<loop> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<loop>: +; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %scalar.ph ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from ir-bb<scalar.ph>) +; CHECK-NEXT: IR %red = phi i32 [ 0, %scalar.ph ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from ir-bb<scalar.ph>) +; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1 +; CHECK-NEXT: IR %l.ext = sext i8 %l to i32 +; CHECK-NEXT: IR %mul = mul i32 %l.ext, 128 +; CHECK-NEXT: IR %red.next = add i32 %red, %mul +; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 +; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end +; CHECK-NEXT: No successors +; CHECK-NEXT: } +entry: + br label %loop + +loop: + %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] + %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] + %l = load i8, ptr %ptr.iv, align 1 + %l.ext = sext i8 %l to i32 + %mul = mul i32 %l.ext, 128 + %red.next = add i32 %red, %mul + %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 + %ec = icmp eq ptr %ptr.iv, %end + br i1 %ec, label %exit, label %loop + +exit: + %red.next.lcssa = phi i32 [ %red.next, %loop ] + ret i32 %red.next.lcssa +} + +define i64 @print_ext_mulacc_extended_const(ptr %start, ptr %end) { +; CHECK-LABEL: 'print_ext_mulacc_extended_const' +; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { +; CHECK-NEXT: Live-in vp<%0> = VF +; CHECK-NEXT: Live-in vp<%1> = VF * UF +; CHECK-NEXT: Live-in vp<%2> = vector-trip-count +; CHECK-NEXT: vp<%3> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<%3> = EXPAND SCEV (1 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%start> + vp<%2> * ir<1> +; CHECK-NEXT: EMIT vp<%5> = reduction-start-vector ir<0>, ir<0>, ir<1> +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<%6> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi vp<%5>, vp<%9> +; CHECK-NEXT: vp<%7> = SCALAR-STEPS vp<%6>, ir<1>, vp<%0> +; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%7> +; CHECK-NEXT: vp<%8> = vector-pointer vp<%next.gep> +; CHECK-NEXT: WIDEN ir<%l> = load vp<%8> +; CHECK-NEXT: EXPRESSION vp<%9> = ir<%red> + reduce.add (mul (ir<%l> zext to i64), (ir<63> zext to i64)) +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%6>, vp<%1> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%2> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<%11> = compute-reduction-result ir<%red>, vp<%9> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<%3>, vp<%2> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: IR %red.next.lcssa = phi i64 [ %red.next, %loop ] (extra operand: vp<%11> from middle.block) +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%4>, middle.block ], [ ir<%start>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%11>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<loop> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<loop>: +; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %red = phi i64 [ 0, %entry ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) +; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1 +; CHECK-NEXT: IR %l.ext = zext i8 %l to i32 +; CHECK-NEXT: IR %mul = mul i32 %l.ext, 63 +; CHECK-NEXT: IR %mul.ext = zext i32 %mul to i64 +; CHECK-NEXT: IR %red.next = add i64 %red, %mul.ext +; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 +; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK: VPlan 'Final VPlan for VF={4},UF={1}' { +; CHECK-NEXT: Live-in ir<%1> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: IR %start2 = ptrtoint ptr %start to i64 +; CHECK-NEXT: IR %end1 = ptrtoint ptr %end to i64 +; CHECK-NEXT: IR %0 = add i64 %end1, 1 +; CHECK-NEXT: IR %1 = sub i64 %0, %start2 +; CHECK-NEXT: EMIT vp<%min.iters.check> = icmp ult ir<%1>, ir<4> +; CHECK-NEXT: EMIT branch-on-cond vp<%min.iters.check> +; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<%1>, ir<4> +; CHECK-NEXT: EMIT vp<%n.vec> = sub ir<%1>, vp<%n.mod.vf> +; CHECK-NEXT: vp<%3> = DERIVED-IV ir<%start> + vp<%n.vec> * ir<1> +; CHECK-NEXT: Successor(s): vector.body +; CHECK-EMPTY: +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT-SCALAR vp<%index> = phi [ ir<0>, vector.ph ], [ vp<%index.next>, vector.body ] +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi ir<0>, ir<%red.next> +; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%index> +; CHECK-NEXT: WIDEN ir<%l> = load vp<%next.gep> +; CHECK-NEXT: WIDEN-CAST vp<%4> = zext ir<%l> to i64 +; CHECK-NEXT: WIDEN ir<%mul> = mul vp<%4>, ir<63> +; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + reduce.add (ir<%mul>) +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%index>, ir<4> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%n.vec> +; CHECK-NEXT: Successor(s): middle.block, vector.body +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<%6> = compute-reduction-result ir<%red>, ir<%red.next> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%1>, vp<%n.vec> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, ir-bb<scalar.ph> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: IR %red.next.lcssa = phi i64 [ %red.next, %loop ] (extra operand: vp<%6> from middle.block) +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<scalar.ph>: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%3>, middle.block ], [ ir<%start>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%6>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<loop> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<loop>: +; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %scalar.ph ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from ir-bb<scalar.ph>) +; CHECK-NEXT: IR %red = phi i64 [ 0, %scalar.ph ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from ir-bb<scalar.ph>) +; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1 +; CHECK-NEXT: IR %l.ext = zext i8 %l to i32 +; CHECK-NEXT: IR %mul = mul i32 %l.ext, 63 +; CHECK-NEXT: IR %mul.ext = zext i32 %mul to i64 +; CHECK-NEXT: IR %red.next = add i64 %red, %mul.ext +; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 +; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end +; CHECK-NEXT: No successors +; CHECK-NEXT: } +entry: + br label %loop + +loop: + %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] + %red = phi i64 [ 0, %entry ], [ %red.next, %loop ] + %l = load i8, ptr %ptr.iv, align 1 + %l.ext = zext i8 %l to i32 + %mul = mul i32 %l.ext, 63 + %mul.ext = zext i32 %mul to i64 + %red.next = add i64 %red, %mul.ext + %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 + %ec = icmp eq ptr %ptr.iv, %end + br i1 %ec, label %exit, label %loop + +exit: + ret i64 %red.next +} + +; Constants >= 128 cannot be treated as sign-extended, so the expression shouldn't extend 128 +define i64 @print_ext_mulacc_not_extended_const(ptr %start, ptr %end) { +; CHECK-LABEL: 'print_ext_mulacc_not_extended_const' +; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { +; CHECK-NEXT: Live-in vp<%0> = VF +; CHECK-NEXT: Live-in vp<%1> = VF * UF +; CHECK-NEXT: Live-in vp<%2> = vector-trip-count +; CHECK-NEXT: vp<%3> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<%3> = EXPAND SCEV (1 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%start> + vp<%2> * ir<1> +; CHECK-NEXT: EMIT vp<%5> = reduction-start-vector ir<0>, ir<0>, ir<1> +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<%6> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi vp<%5>, vp<%9> +; CHECK-NEXT: vp<%7> = SCALAR-STEPS vp<%6>, ir<1>, vp<%0> +; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%7> +; CHECK-NEXT: vp<%8> = vector-pointer vp<%next.gep> +; CHECK-NEXT: WIDEN ir<%l> = load vp<%8> +; CHECK-NEXT: WIDEN-CAST ir<%l.ext> = sext ir<%l> to i32 +; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%l.ext>, ir<128> +; CHECK-NEXT: EXPRESSION vp<%9> = ir<%red> + reduce.add (ir<%mul> sext to i64) +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%6>, vp<%1> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%2> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<%11> = compute-reduction-result ir<%red>, vp<%9> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<%3>, vp<%2> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: IR %red.next.lcssa = phi i64 [ %red.next, %loop ] (extra operand: vp<%11> from middle.block) +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%4>, middle.block ], [ ir<%start>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%11>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<loop> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<loop>: +; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %red = phi i64 [ 0, %entry ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) +; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1 +; CHECK-NEXT: IR %l.ext = sext i8 %l to i32 +; CHECK-NEXT: IR %mul = mul i32 %l.ext, 128 +; CHECK-NEXT: IR %mul.ext = sext i32 %mul to i64 +; CHECK-NEXT: IR %red.next = add i64 %red, %mul.ext +; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 +; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK: VPlan 'Final VPlan for VF={4},UF={1}' { +; CHECK-NEXT: Live-in ir<%1> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: IR %start2 = ptrtoint ptr %start to i64 +; CHECK-NEXT: IR %end1 = ptrtoint ptr %end to i64 +; CHECK-NEXT: IR %0 = add i64 %end1, 1 +; CHECK-NEXT: IR %1 = sub i64 %0, %start2 +; CHECK-NEXT: EMIT vp<%min.iters.check> = icmp ult ir<%1>, ir<4> +; CHECK-NEXT: EMIT branch-on-cond vp<%min.iters.check> +; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<%1>, ir<4> +; CHECK-NEXT: EMIT vp<%n.vec> = sub ir<%1>, vp<%n.mod.vf> +; CHECK-NEXT: vp<%3> = DERIVED-IV ir<%start> + vp<%n.vec> * ir<1> +; CHECK-NEXT: Successor(s): vector.body +; CHECK-EMPTY: +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT-SCALAR vp<%index> = phi [ ir<0>, vector.ph ], [ vp<%index.next>, vector.body ] +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi ir<0>, ir<%red.next> +; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<%index> +; CHECK-NEXT: WIDEN ir<%l> = load vp<%next.gep> +; CHECK-NEXT: WIDEN-CAST ir<%l.ext> = sext ir<%l> to i32 +; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%l.ext>, ir<128> +; CHECK-NEXT: WIDEN-CAST ir<%mul.ext> = sext ir<%mul> to i64 +; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + reduce.add (ir<%mul.ext>) +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%index>, ir<4> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%n.vec> +; CHECK-NEXT: Successor(s): middle.block, vector.body +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<%5> = compute-reduction-result ir<%red>, ir<%red.next> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%1>, vp<%n.vec> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, ir-bb<scalar.ph> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: IR %red.next.lcssa = phi i64 [ %red.next, %loop ] (extra operand: vp<%5> from middle.block) +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<scalar.ph>: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%3>, middle.block ], [ ir<%start>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%5>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<loop> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<loop>: +; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %scalar.ph ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from ir-bb<scalar.ph>) +; CHECK-NEXT: IR %red = phi i64 [ 0, %scalar.ph ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from ir-bb<scalar.ph>) +; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1 +; CHECK-NEXT: IR %l.ext = sext i8 %l to i32 +; CHECK-NEXT: IR %mul = mul i32 %l.ext, 128 +; CHECK-NEXT: IR %mul.ext = sext i32 %mul to i64 +; CHECK-NEXT: IR %red.next = add i64 %red, %mul.ext +; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 +; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end +; CHECK-NEXT: No successors +; CHECK-NEXT: } +entry: + br label %loop + +loop: + %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] + %red = phi i64 [ 0, %entry ], [ %red.next, %loop ] + %l = load i8, ptr %ptr.iv, align 1 + %l.ext = sext i8 %l to i32 + %mul = mul i32 %l.ext, 128 + %mul.ext = sext i32 %mul to i64 + %red.next = add i64 %red, %mul.ext + %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 + %ec = icmp eq ptr %ptr.iv, %end + br i1 %ec, label %exit, label %loop + +exit: + %red.next.lcssa = phi i64 [ %red.next, %loop ] + ret i64 %red.next.lcssa +} diff --git a/llvm/test/Transforms/SLPVectorizer/X86/parent-node-schedulable-with-multi-copyables.ll b/llvm/test/Transforms/SLPVectorizer/X86/parent-node-schedulable-with-multi-copyables.ll new file mode 100644 index 0000000..9e96e93 --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/X86/parent-node-schedulable-with-multi-copyables.ll @@ -0,0 +1,170 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -passes=slp-vectorizer -S -slp-threshold=-100 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s + +define i64 @test(ptr %arg1, i64 %alloca.promoted344, i8 %load.311.i, i1 %load1.i) { +; CHECK-LABEL: define i64 @test( +; CHECK-SAME: ptr [[ARG1:%.*]], i64 [[ALLOCA_PROMOTED344:%.*]], i8 [[LOAD_311_I:%.*]], i1 [[LOAD1_I:%.*]]) { +; CHECK-NEXT: [[BB:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i8> <i8 0, i8 0, i8 0, i8 poison>, i8 [[LOAD_311_I]], i32 3 +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i8> <i8 poison, i8 poison, i8 0, i8 0>, i8 [[LOAD_311_I]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i64> poison, i64 [[ALLOCA_PROMOTED344]], i32 0 +; CHECK-NEXT: br label %[[BB2:.*]] +; CHECK: [[BB2]]: +; CHECK-NEXT: [[TMP3:%.*]] = phi <2 x i64> [ zeroinitializer, %[[BB]] ], [ [[TMP28:%.*]], %[[BB12_8_I:.*]] ] +; CHECK-NEXT: [[TMP4:%.*]] = phi <8 x i8> [ zeroinitializer, %[[BB]] ], [ [[TMP29:%.*]], %[[BB12_8_I]] ] +; CHECK-NEXT: br i1 [[LOAD1_I]], label %[[SPAM_EXIT:.*]], label %[[BB4_LR_PH_I:.*]] +; CHECK: [[BB4_LR_PH_I]]: +; CHECK-NEXT: br i1 true, label %[[BB3_I_I_PEEL:.*]], label %[[EGGS_EXIT_I_PEEL:.*]] +; CHECK: [[BB3_I_I_PEEL]]: +; CHECK-NEXT: [[TMP5:%.*]] = and <2 x i64> [[TMP3]], splat (i64 1) +; CHECK-NEXT: [[LOAD4_I_I_PEEL:%.*]] = load i64, ptr [[ARG1]], align 8 +; CHECK-NEXT: [[SHL_I_I_PEEL:%.*]] = shl i64 [[LOAD4_I_I_PEEL]], 1 +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> poison, <2 x i32> <i32 poison, i32 0> +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x i64> [[TMP6]], i64 [[SHL_I_I_PEEL]], i32 0 +; CHECK-NEXT: [[TMP8:%.*]] = or <2 x i64> [[TMP5]], [[TMP7]] +; CHECK-NEXT: [[TMP9:%.*]] = xor <2 x i64> [[TMP5]], [[TMP7]] +; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP8]], <2 x i64> [[TMP9]], <2 x i32> <i32 0, i32 3> +; CHECK-NEXT: br label %[[EGGS_EXIT_I_PEEL]] +; CHECK: [[EGGS_EXIT_I_PEEL]]: +; CHECK-NEXT: [[TMP11:%.*]] = phi <2 x i64> [ [[TMP10]], %[[BB3_I_I_PEEL]] ], [ zeroinitializer, %[[BB4_LR_PH_I]] ] +; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <2 x i64> [[TMP11]], <2 x i64> poison, <4 x i32> <i32 0, i32 1, i32 0, i32 0> +; CHECK-NEXT: [[TMP13:%.*]] = trunc <4 x i64> [[TMP12]] to <4 x i8> +; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i64> [[TMP12]], i32 1 +; CHECK-NEXT: br label %[[SPAM_EXIT]] +; CHECK: [[SPAM_EXIT]]: +; CHECK-NEXT: [[GETELEMENTPTR_I_I_PROMOTED346:%.*]] = phi i64 [ [[TMP14]], %[[EGGS_EXIT_I_PEEL]] ], [ 0, %[[BB2]] ] +; CHECK-NEXT: [[LOAD_8_I:%.*]] = phi i8 [ 0, %[[EGGS_EXIT_I_PEEL]] ], [ 1, %[[BB2]] ] +; CHECK-NEXT: [[TMP15:%.*]] = phi <4 x i8> [ [[TMP13]], %[[EGGS_EXIT_I_PEEL]] ], [ zeroinitializer, %[[BB2]] ] +; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <4 x i8> [[TMP15]], <4 x i8> poison, <4 x i32> <i32 2, i32 2, i32 2, i32 2> +; CHECK-NEXT: br i1 [[LOAD1_I]], label %[[BB12_8_I]], label %[[BB12_1_THREAD_I:.*]] +; CHECK: [[BB12_1_THREAD_I]]: +; CHECK-NEXT: [[TMP17:%.*]] = extractelement <8 x i8> [[TMP4]], i32 0 +; CHECK-NEXT: [[ICMP5_3_I:%.*]] = icmp eq i8 [[TMP17]], 0 +; CHECK-NEXT: br i1 [[ICMP5_3_I]], label %[[BB12_3_I:.*]], label %[[BB8_3_I:.*]] +; CHECK: [[BB8_3_I]]: +; CHECK-NEXT: br label %[[BB12_3_I]] +; CHECK: [[BB12_3_I]]: +; CHECK-NEXT: [[TMP18:%.*]] = extractelement <8 x i8> [[TMP4]], i32 1 +; CHECK-NEXT: [[ICMP5_4_I:%.*]] = icmp eq i8 [[TMP18]], 0 +; CHECK-NEXT: br i1 [[ICMP5_4_I]], label %[[BB12_4_I:.*]], label %[[BB8_4_I:.*]] +; CHECK: [[BB8_4_I]]: +; CHECK-NEXT: br label %[[BB12_4_I]] +; CHECK: [[BB12_4_I]]: +; CHECK-NEXT: [[TMP19:%.*]] = extractelement <8 x i8> [[TMP4]], i32 2 +; CHECK-NEXT: [[ICMP5_5_I:%.*]] = icmp eq i8 [[TMP19]], 0 +; CHECK-NEXT: br i1 [[ICMP5_5_I]], label %[[BB12_5_I:.*]], label %[[BB8_5_I:.*]] +; CHECK: [[BB8_5_I]]: +; CHECK-NEXT: br label %[[BB12_5_I]] +; CHECK: [[BB12_5_I]]: +; CHECK-NEXT: [[TMP20:%.*]] = extractelement <8 x i8> [[TMP4]], i32 3 +; CHECK-NEXT: [[ICMP5_7_I:%.*]] = icmp eq i8 [[TMP20]], 0 +; CHECK-NEXT: br i1 [[ICMP5_7_I]], label %[[BB12_7_I:.*]], label %[[BB8_7_I:.*]] +; CHECK: [[BB8_7_I]]: +; CHECK-NEXT: br label %[[BB12_7_I]] +; CHECK: [[BB12_7_I]]: +; CHECK-NEXT: [[TMP21:%.*]] = extractelement <8 x i8> [[TMP4]], i32 4 +; CHECK-NEXT: [[ICMP5_8_I:%.*]] = icmp eq i8 [[TMP21]], 0 +; CHECK-NEXT: br i1 [[ICMP5_8_I]], label %[[BB12_8_I]], label %[[BB8_8_I:.*]] +; CHECK: [[BB8_8_I]]: +; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i8> [[TMP1]], i8 [[LOAD_8_I]], i32 1 +; CHECK-NEXT: [[TMP23:%.*]] = insertelement <4 x i8> poison, i8 [[LOAD_8_I]], i32 0 +; CHECK-NEXT: [[TMP24:%.*]] = shufflevector <8 x i8> [[TMP4]], <8 x i8> poison, <4 x i32> <i32 poison, i32 5, i32 6, i32 7> +; CHECK-NEXT: [[TMP25:%.*]] = shufflevector <4 x i8> [[TMP23]], <4 x i8> [[TMP24]], <4 x i32> <i32 0, i32 5, i32 6, i32 7> +; CHECK-NEXT: br label %[[BB12_8_I]] +; CHECK: [[BB12_8_I]]: +; CHECK-NEXT: [[TMP26:%.*]] = phi <4 x i8> [ [[TMP0]], %[[BB12_7_I]] ], [ [[TMP22]], %[[BB8_8_I]] ], [ [[TMP15]], %[[SPAM_EXIT]] ] +; CHECK-NEXT: [[TMP27:%.*]] = phi <4 x i8> [ zeroinitializer, %[[BB12_7_I]] ], [ [[TMP25]], %[[BB8_8_I]] ], [ [[TMP16]], %[[SPAM_EXIT]] ] +; CHECK-NEXT: [[TMP28]] = insertelement <2 x i64> [[TMP2]], i64 [[GETELEMENTPTR_I_I_PROMOTED346]], i32 1 +; CHECK-NEXT: [[TMP29]] = shufflevector <4 x i8> [[TMP26]], <4 x i8> [[TMP27]], <8 x i32> <i32 2, i32 7, i32 5, i32 0, i32 1, i32 3, i32 4, i32 6> +; CHECK-NEXT: br label %[[BB2]] +; +bb: + br label %bb2 + +bb2: + %getelementptr.i.i.promoted = phi i64 [ 0, %bb ], [ %getelementptr.i.i.promoted346, %bb12.8.i ] + %alloca.promoted = phi i64 [ 0, %bb ], [ %alloca.promoted344, %bb12.8.i ] + %load.8.i231 = phi i8 [ 0, %bb ], [ %load.8.i239, %bb12.8.i ] + %load.7.i217 = phi i8 [ 0, %bb ], [ %load.7.i225, %bb12.8.i ] + %load.626.i200 = phi i8 [ 0, %bb ], [ %load.626.i208, %bb12.8.i ] + %load.6.i183 = phi i8 [ 0, %bb ], [ %load.6.i191, %bb12.8.i ] + %load.5.i167 = phi i8 [ 0, %bb ], [ %load.5.i175, %bb12.8.i ] + %load.418.i148 = phi i8 [ 0, %bb ], [ %load.418.i156, %bb12.8.i ] + %load.4.i129 = phi i8 [ 0, %bb ], [ %load.4.i137, %bb12.8.i ] + %load.3.i111 = phi i8 [ 0, %bb ], [ %load.3.i119, %bb12.8.i ] + br i1 %load1.i, label %spam.exit, label %bb4.lr.ph.i + +bb4.lr.ph.i: + br i1 true, label %bb3.i.i.peel, label %eggs.exit.i.peel + +bb3.i.i.peel: + %and.i.i.peel = and i64 %alloca.promoted, 1 + %load4.i.i.peel = load i64, ptr %arg1, align 8 + %shl.i.i.peel = shl i64 %load4.i.i.peel, 1 + %or.i.i.peel = or i64 %shl.i.i.peel, %and.i.i.peel + %and6.i.i.peel = and i64 %getelementptr.i.i.promoted, 1 + %xor.i.i.peel = xor i64 %and6.i.i.peel, %alloca.promoted + br label %eggs.exit.i.peel + +eggs.exit.i.peel: + %load5.i.i93.peel = phi i64 [ %xor.i.i.peel, %bb3.i.i.peel ], [ 0, %bb4.lr.ph.i ] + %or.i.i91.peel = phi i64 [ %or.i.i.peel, %bb3.i.i.peel ], [ 0, %bb4.lr.ph.i ] + %0 = trunc i64 %or.i.i91.peel to i8 + %1 = trunc nuw i64 %or.i.i91.peel to i8 + %2 = trunc i64 %load5.i.i93.peel to i8 + br label %spam.exit + +spam.exit: + %getelementptr.i.i.promoted346 = phi i64 [ %load5.i.i93.peel, %eggs.exit.i.peel ], [ 0, %bb2 ] + %load.834.i = phi i8 [ %2, %eggs.exit.i.peel ], [ 0, %bb2 ] + %load.7.i25 = phi i8 [ %1, %eggs.exit.i.peel ], [ 0, %bb2 ] + %load.8.i = phi i8 [ 0, %eggs.exit.i.peel ], [ 1, %bb2 ] + %load.6.i18 = phi i8 [ %0, %eggs.exit.i.peel ], [ 0, %bb2 ] + br i1 %load1.i, label %bb12.8.i, label %bb12.1.thread.i + +bb12.1.thread.i: + %icmp5.3.i = icmp eq i8 %load.3.i111, 0 + br i1 %icmp5.3.i, label %bb12.3.i, label %bb8.3.i + +bb8.3.i: + br label %bb12.3.i + +bb12.3.i: + %icmp5.4.i = icmp eq i8 %load.4.i129, 0 + br i1 %icmp5.4.i, label %bb12.4.i, label %bb8.4.i + +bb8.4.i: + br label %bb12.4.i + +bb12.4.i: + %icmp5.5.i = icmp eq i8 %load.5.i167, 0 + br i1 %icmp5.5.i, label %bb12.5.i, label %bb8.5.i + +bb8.5.i: + br label %bb12.5.i + +bb12.5.i: + %icmp5.7.i = icmp eq i8 %load.7.i217, 0 + br i1 %icmp5.7.i, label %bb12.7.i, label %bb8.7.i + +bb8.7.i: + br label %bb12.7.i + +bb12.7.i: + %icmp5.8.i = icmp eq i8 %load.8.i231, 0 + br i1 %icmp5.8.i, label %bb12.8.i, label %bb8.8.i + +bb8.8.i: + br label %bb12.8.i + +bb12.8.i: + %load.8.i239 = phi i8 [ 0, %bb12.7.i ], [ %load.8.i, %bb8.8.i ], [ %load.834.i, %spam.exit ] + %load.7.i225 = phi i8 [ 0, %bb12.7.i ], [ %load.311.i, %bb8.8.i ], [ %load.7.i25, %spam.exit ] + %load.626.i208 = phi i8 [ 0, %bb12.7.i ], [ %load.8.i, %bb8.8.i ], [ %load.6.i18, %spam.exit ] + %load.6.i191 = phi i8 [ %load.311.i, %bb12.7.i ], [ 0, %bb8.8.i ], [ %load.6.i18, %spam.exit ] + %load.5.i175 = phi i8 [ 0, %bb12.7.i ], [ %load.6.i183, %bb8.8.i ], [ %load.6.i18, %spam.exit ] + %load.418.i156 = phi i8 [ 0, %bb12.7.i ], [ %load.626.i200, %bb8.8.i ], [ %load.6.i18, %spam.exit ] + %load.4.i137 = phi i8 [ 0, %bb12.7.i ], [ %load.418.i148, %bb8.8.i ], [ %load.6.i18, %spam.exit ] + %load.3.i119 = phi i8 [ 0, %bb12.7.i ], [ 0, %bb8.8.i ], [ %load.6.i18, %spam.exit ] + br label %bb2 +} diff --git a/llvm/test/Transforms/SimplifyCFG/pr165088.ll b/llvm/test/Transforms/SimplifyCFG/pr165088.ll new file mode 100644 index 0000000..4514a19 --- /dev/null +++ b/llvm/test/Transforms/SimplifyCFG/pr165088.ll @@ -0,0 +1,186 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -S -passes="simplifycfg<switch-range-to-icmp>" < %s | FileCheck %s + +; Avoid getting stuck in the cycle pr165088_cycle_[1-4]. + +define void @pr165088_cycle_1(i8 %x) { +; CHECK-LABEL: define void @pr165088_cycle_1( +; CHECK-SAME: i8 [[X:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i8 [[X]], 2 +; CHECK-NEXT: br i1 [[TMP0]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]] +; CHECK: [[BLOCK1:.*]]: +; CHECK-NEXT: [[COND2:%.*]] = icmp ugt i8 [[X]], 1 +; CHECK-NEXT: br i1 [[COND2]], label %[[BLOCK3]], label %[[BLOCK2]] +; CHECK: [[BLOCK2]]: +; CHECK-NEXT: br label %[[BLOCK3]] +; CHECK: [[BLOCK3]]: +; CHECK-NEXT: [[COND3:%.*]] = icmp eq i8 [[X]], 0 +; CHECK-NEXT: br i1 [[COND3]], label %[[EXIT:.*]], label %[[BLOCK1]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + %switch = icmp uge i8 %x, 2 + %cond1 = icmp ugt i8 %x, 1 + %or.cond = and i1 %switch, %cond1 + br i1 %or.cond, label %block3, label %block2 + +block1: + %cond2 = icmp ugt i8 %x, 1 + br i1 %cond2, label %block3, label %block2 + +block2: + br label %block3 + +block3: + %cond3 = icmp eq i8 %x, 0 + br i1 %cond3, label %exit, label %block1 + +exit: + ret void +} + +define void @pr165088_cycle_2(i8 %x) { +; CHECK-LABEL: define void @pr165088_cycle_2( +; CHECK-SAME: i8 [[X:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[X]], 2 +; CHECK-NEXT: br i1 [[SWITCH]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]] +; CHECK: [[BLOCK1:.*]]: +; CHECK-NEXT: [[COND2:%.*]] = icmp ugt i8 [[X]], 1 +; CHECK-NEXT: br i1 [[COND2]], label %[[BLOCK3]], label %[[BLOCK2]] +; CHECK: [[BLOCK2]]: +; CHECK-NEXT: br label %[[BLOCK3]] +; CHECK: [[BLOCK3]]: +; CHECK-NEXT: [[COND3:%.*]] = icmp eq i8 [[X]], 0 +; CHECK-NEXT: br i1 [[COND3]], label %[[EXIT:.*]], label %[[BLOCK1]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + switch i8 %x, label %block3 [ + i8 1, label %block2 + i8 0, label %block2 + ] + +block1: ; preds = %block3 + %cond2 = icmp ugt i8 %x, 1 + br i1 %cond2, label %block3, label %block2 + +block2: ; preds = %entry, %entry, %block1 + br label %block3 + +block3: ; preds = %entry, %block2, %block1 + %cond3 = icmp eq i8 %x, 0 + br i1 %cond3, label %exit, label %block1 + +exit: ; preds = %block3 + ret void +} + +define void @pr165088_cycle_3(i8 %x) { +; CHECK-LABEL: define void @pr165088_cycle_3( +; CHECK-SAME: i8 [[X:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[BLOCK3:.*]] +; CHECK: [[BLOCK3]]: +; CHECK-NEXT: [[COND3:%.*]] = icmp eq i8 [[X]], 0 +; CHECK-NEXT: br i1 [[COND3]], label %[[EXIT:.*]], label %[[BLOCK3]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + switch i8 %x, label %block1 [ + i8 1, label %block2 + i8 0, label %block2 + ] + +block1: ; preds = %entry, %block3 + %cond2 = icmp ugt i8 %x, 1 + br i1 %cond2, label %block3, label %block2 + +block2: ; preds = %entry, %entry, %block1 + br label %block3 + +block3: ; preds = %block2, %block1 + %cond3 = icmp eq i8 %x, 0 + br i1 %cond3, label %exit, label %block1 + +exit: ; preds = %block3 + ret void +} + +define void @pr165088_cycle_4(i8 %x) { +; CHECK-LABEL: define void @pr165088_cycle_4( +; CHECK-SAME: i8 [[X:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i8 [[X]], 2 +; CHECK-NEXT: br i1 [[TMP0]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]] +; CHECK: [[BLOCK1:.*]]: +; CHECK-NEXT: [[COND2_OLD:%.*]] = icmp ugt i8 [[X]], 1 +; CHECK-NEXT: br i1 [[COND2_OLD]], label %[[BLOCK3]], label %[[BLOCK2]] +; CHECK: [[BLOCK2]]: +; CHECK-NEXT: br label %[[BLOCK3]] +; CHECK: [[BLOCK3]]: +; CHECK-NEXT: [[COND3:%.*]] = icmp eq i8 [[X]], 0 +; CHECK-NEXT: br i1 [[COND3]], label %[[EXIT:.*]], label %[[BLOCK1]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + %switch = icmp ult i8 %x, 2 + br i1 %switch, label %block2, label %block1 + +block1: ; preds = %entry, %block3 + %cond2 = icmp ugt i8 %x, 1 + br i1 %cond2, label %block3, label %block2 + +block2: ; preds = %entry, %block1 + br label %block3 + +block3: ; preds = %block2, %block1 + %cond3 = icmp eq i8 %x, 0 + br i1 %cond3, label %exit, label %block1 + +exit: ; preds = %block3 + ret void +} + +define void @pr165088_original(i8 %x) { +; CHECK-LABEL: define void @pr165088_original( +; CHECK-SAME: i8 [[X:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i8 [[X]], 2 +; CHECK-NEXT: br i1 [[TMP0]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]] +; CHECK: [[BLOCK1:.*]]: +; CHECK-NEXT: [[COND3_OLD_OLD:%.*]] = icmp ugt i8 [[X]], 1 +; CHECK-NEXT: br i1 [[COND3_OLD_OLD]], label %[[BLOCK3]], label %[[BLOCK2]] +; CHECK: [[BLOCK2]]: +; CHECK-NEXT: br label %[[BLOCK3]] +; CHECK: [[BLOCK3]]: +; CHECK-NEXT: [[COND4:%.*]] = icmp eq i8 [[X]], 0 +; CHECK-NEXT: br i1 [[COND4]], label %[[EXIT:.*]], label %[[BLOCK1]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + %cond = icmp ne i8 %x, 0 + %cond3 = icmp ne i8 %x, 0 + %or.cond = and i1 %cond, %cond3 + br i1 %or.cond, label %block3, label %block2 + +block1: ; preds = %block3 + %cond3.old = icmp ugt i8 %x, 1 + br i1 %cond3.old, label %block3, label %block2 + +block2: ; preds = %block1, %entry + br label %block3 + +block3: ; preds = %block2, %block1, %entry + %cond4 = icmp eq i8 %x, 0 + br i1 %cond4, label %exit, label %block1 + +exit: ; preds = %block3 + ret void +} diff --git a/llvm/unittests/ProfileData/InstrProfTest.cpp b/llvm/unittests/ProfileData/InstrProfTest.cpp index dd17844..8641b93 100644 --- a/llvm/unittests/ProfileData/InstrProfTest.cpp +++ b/llvm/unittests/ProfileData/InstrProfTest.cpp @@ -914,7 +914,7 @@ TEST_P(MaybeSparseInstrProfTest, annotate_vp_data) { ASSERT_THAT(ValueData, SizeIs(0)); // Remove the MD_prof metadata - Inst->setMetadata(LLVMContext::MD_prof, 0); + Inst->setMetadata(LLVMContext::MD_prof, nullptr); // Annotate 5 records this time. annotateValueSite(*M, *Inst, R.get(), IPVK_IndirectCallTarget, 0, 5); ValueData = getValueProfDataFromInst(*Inst, IPVK_IndirectCallTarget, 5, T); @@ -932,7 +932,7 @@ TEST_P(MaybeSparseInstrProfTest, annotate_vp_data) { ASSERT_EQ(2U, ValueData[4].Count); // Remove the MD_prof metadata - Inst->setMetadata(LLVMContext::MD_prof, 0); + Inst->setMetadata(LLVMContext::MD_prof, nullptr); // Annotate with 4 records. InstrProfValueData VD0Sorted[] = {{1000, 6}, {2000, 5}, {3000, 4}, {4000, 3}, {5000, 2}, {6000, 1}}; diff --git a/llvm/utils/git/code-format-helper.py b/llvm/utils/git/code-format-helper.py index 406a728..dff7f78 100755 --- a/llvm/utils/git/code-format-helper.py +++ b/llvm/utils/git/code-format-helper.py @@ -508,7 +508,7 @@ if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( - "--token", type=str, required=True, help="GitHub authentiation token" + "--token", type=str, required=True, help="GitHub authentication token" ) parser.add_argument( "--repo", diff --git a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td index d2df244..5241f9a 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td @@ -147,6 +147,35 @@ class ROCDL_DimGetterFunctionOp<string mnemonic, string device_function, } //===----------------------------------------------------------------------===// +// ROCDL vector types definitions +//===----------------------------------------------------------------------===// + +class ROCDL_ConcreteVector<Type elem, int length> : + FixedVectorOfLengthAndType<[length], [elem]>, + BuildableType< + "::mlir::VectorType::get({" # length # "} ," + # elem.builderCall # ")">; + +def ROCDL_V2I16Type : ROCDL_ConcreteVector<I16, 2>; +def ROCDL_V2F16Type : ROCDL_ConcreteVector<F16, 2>; +def ROCDL_V2I32Type : ROCDL_ConcreteVector<I32, 2>; +def ROCDL_V2BF16Type : ROCDL_ConcreteVector<BF16, 2>; +def ROCDL_V2F32Type : ROCDL_ConcreteVector<F32, 2>; +def ROCDL_V3I32Type : ROCDL_ConcreteVector<I32, 3>; +def ROCDL_V4I32Type : ROCDL_ConcreteVector<I32, 4>; +def ROCDL_V6I32Type : ROCDL_ConcreteVector<I32, 6>; +def ROCDL_V8I32Type : ROCDL_ConcreteVector<I32, 8>; +def ROCDL_V8BF16Type : ROCDL_ConcreteVector<BF16, 8>; +def ROCDL_V8F16Type : ROCDL_ConcreteVector<F16, 8>; +def ROCDL_V8F32Type : ROCDL_ConcreteVector<F32, 8>; +def ROCDL_V16BF16Type : ROCDL_ConcreteVector<BF16, 16>; +def ROCDL_V16F16Type : ROCDL_ConcreteVector<F16, 16>; +def ROCDL_V16F32Type : ROCDL_ConcreteVector<F32, 16>; +def ROCDL_V32F16Type : ROCDL_ConcreteVector<F16, 32>; +def ROCDL_V32BF16Type : ROCDL_ConcreteVector<BF16, 32>; +def ROCDL_V32F32Type : ROCDL_ConcreteVector<F32, 32>; + +//===----------------------------------------------------------------------===// // Wave-level primitives //===----------------------------------------------------------------------===// @@ -664,6 +693,68 @@ def ROCDL_GlobalLoadLDSOp : } //===---------------------------------------------------------------------===// +// Tensor load/store intrinsics (available in GFX1250) +//===---------------------------------------------------------------------===// + +// Base class for tensor load/store operations with 4 descriptor groups. +class ROCDL_TensorLDSIntrOp<string mnemonic> : + ROCDL_IntrOp<mnemonic, [], [], [], 0, 0, 1, 0, [4], ["cachePolicy"]> { + dag args = (ins ROCDL_V4I32Type:$dgroup0, ROCDL_V8I32Type:$dgroup1, + ROCDL_V4I32Type:$dgroup2, ROCDL_V4I32Type:$dgroup3, + I32Attr:$cachePolicy); + let arguments = !con(args, baseArgs); + let summary = "Base class for ROCDL tensor load/store to/from LDS."; + let description = [{ + Moves tiles of tensor data between global memory and LDS. The tile is + described by the $dgroup descriptors. 4 $dgroup descriptors allows for + movement of up to 5D tensors. $cachePolicy describes the memory scope and an + indicator of expected data re-use. + + This op is for gfx1250+ architectures. + }]; + let assemblyFormat = [{ + attr-dict operands `cachepolicy` $cachePolicy `:` type($dgroup0) `,` type($dgroup1) + }]; + let extraClassDefinition = [{ + SmallVector<Value> $cppClass::getAccessedOperands() { + return {getDgroup0(), getDgroup1(), getDgroup2(), getDgroup3()}; + } + }]; +} + +// Base class for tensor load/store operations with 2 descriptor groups +// (D2 variant). +class ROCDL_TensorLDSIntrD2Op<string mnemonic> : + ROCDL_IntrOp<mnemonic, [], [], [], 0, 0, 1, 0, [2], ["cachePolicy"]> { + dag args = (ins ROCDL_V4I32Type:$dgroup0, ROCDL_V8I32Type:$dgroup1, + I32Attr:$cachePolicy); + let arguments = !con(args, baseArgs); + let summary = "Base class for ROCDL tensor load/store to/from LDS (D2 variant)."; + let description = [{ + Moves tiles of tensor data between global memory and LDS. The tile is + described by the $dgroup descriptors. 2 $dgroup descriptors allows for + movement of up to 2D tensors. $cachePolicy describes the memory scope and an + indicator of expected data re-use. + + This op is for gfx1250+ architectures. + }]; + let assemblyFormat = [{ + attr-dict operands `cachepolicy` $cachePolicy `:` type($dgroup0) `,` type($dgroup1) + }]; + let extraClassDefinition = [{ + SmallVector<Value> $cppClass::getAccessedOperands() { + return {getDgroup0(), getDgroup1()}; + } + }]; +} + +// Tensor load and store operations +def ROCDL_TensorLoadToLDSOp : ROCDL_TensorLDSIntrOp<"tensor.load.to.lds">; +def ROCDL_TensorStoreFromLDSOp : ROCDL_TensorLDSIntrOp<"tensor.store.from.lds">; +def ROCDL_TensorLoadToLDSD2Op : ROCDL_TensorLDSIntrD2Op<"tensor.load.to.lds.d2">; +def ROCDL_TensorStoreFromLDSD2Op : ROCDL_TensorLDSIntrD2Op<"tensor.store.from.lds.d2">; + +//===---------------------------------------------------------------------===// // Operations on raw buffer resources (stride of 0, bounds checks either off or in // raw buffer mode). //===---------------------------------------------------------------------===// @@ -932,30 +1023,6 @@ def ROCDL_Permlane32SwapOp : ROCDL_IntrOp<"permlane32.swap", [], [], }]; } -class ROCDL_ConcreteVector<Type elem, int length> : - FixedVectorOfLengthAndType<[length], [elem]>, - BuildableType< - "::mlir::VectorType::get({" # length # "} ," - # elem.builderCall # ")">; - -def ROCDL_V2I16Type : ROCDL_ConcreteVector<I16, 2>; -def ROCDL_V2F16Type : ROCDL_ConcreteVector<F16, 2>; -def ROCDL_V2I32Type : ROCDL_ConcreteVector<I32, 2>; -def ROCDL_V2BF16Type : ROCDL_ConcreteVector<BF16, 2>; -def ROCDL_V2F32Type : ROCDL_ConcreteVector<F32, 2>; -def ROCDL_V3I32Type : ROCDL_ConcreteVector<I32, 3>; -def ROCDL_V6I32Type : ROCDL_ConcreteVector<I32, 6>; -def ROCDL_V8I32Type : ROCDL_ConcreteVector<I32, 8>; -def ROCDL_V8BF16Type : ROCDL_ConcreteVector<BF16, 8>; -def ROCDL_V8F16Type : ROCDL_ConcreteVector<F16, 8>; -def ROCDL_V8F32Type : ROCDL_ConcreteVector<F32, 8>; -def ROCDL_V16BF16Type : ROCDL_ConcreteVector<BF16, 16>; -def ROCDL_V16F16Type : ROCDL_ConcreteVector<F16, 16>; -def ROCDL_V16F32Type : ROCDL_ConcreteVector<F32, 16>; -def ROCDL_V32F16Type : ROCDL_ConcreteVector<F16, 32>; -def ROCDL_V32BF16Type : ROCDL_ConcreteVector<BF16, 32>; -def ROCDL_V32F32Type : ROCDL_ConcreteVector<F32, 32>; - //===---------------------------------------------------------------------===// // 16-bit float intrinsics //===---------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td b/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td index 2f87975..a18c18a 100644 --- a/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td +++ b/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td @@ -2117,6 +2117,56 @@ def OpenACC_KernelsOp : OpenACC_Op<"kernels", } //===----------------------------------------------------------------------===// +// acc.kernel_environment +//===----------------------------------------------------------------------===// + +def OpenACC_KernelEnvironmentOp : OpenACC_Op<"kernel_environment", + [AttrSizedOperandSegments, RecursiveMemoryEffects, SingleBlock, + NoTerminator, + MemoryEffects<[MemWrite<OpenACC_ConstructResource>, + MemRead<OpenACC_CurrentDeviceIdResource>]>]> { + let summary = "Decomposition of compute constructs to capture data mapping " + "and asynchronous behavior information"; + let description = [{ + The `acc.kernel_environment` operation represents a decomposition of + any OpenACC compute construct (acc.kernels, acc.parallel, or + acc.serial) that captures data mapping and asynchronous behavior: + - data clause operands + - async clause operands + - wait clause operands + + This allows kernel execution parallelism and privatization to be + handled separately, facilitating eventual lowering to GPU dialect where + kernel launching and compute offloading are handled separately. + }]; + + let arguments = (ins + Variadic<AnyType>:$dataClauseOperands, + Variadic<IntOrIndex>:$asyncOperands, + OptionalAttr<DeviceTypeArrayAttr>:$asyncOperandsDeviceType, + OptionalAttr<DeviceTypeArrayAttr>:$asyncOnly, + Variadic<IntOrIndex>:$waitOperands, + OptionalAttr<DenseI32ArrayAttr>:$waitOperandsSegments, + OptionalAttr<DeviceTypeArrayAttr>:$waitOperandsDeviceType, + OptionalAttr<BoolArrayAttr>:$hasWaitDevnum, + OptionalAttr<DeviceTypeArrayAttr>:$waitOnly); + + let regions = (region SizedRegion<1>:$region); + + let assemblyFormat = [{ + oilist( + `dataOperands` `(` $dataClauseOperands `:` type($dataClauseOperands) `)` + | `async` `` custom<DeviceTypeOperandsWithKeywordOnly>($asyncOperands, + type($asyncOperands), $asyncOperandsDeviceType, $asyncOnly) + | `wait` `` custom<WaitClause>($waitOperands, type($waitOperands), + $waitOperandsDeviceType, $waitOperandsSegments, $hasWaitDevnum, + $waitOnly) + ) + $region attr-dict + }]; +} + +//===----------------------------------------------------------------------===// // 2.6.5 data Construct //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp index eb2d825..bd25e94 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -495,13 +495,14 @@ FailureOr<PackResult> linalg::pack(RewriterBase &rewriter, if (failed(maybePackedDimForEachOperand)) return failure(); packedOperandsDims.packedDimForEachOperand = *maybePackedDimForEachOperand; - listOfPackedOperandsDim.pushBack(std::move(packedOperandsDims)); LDBG() << "++++ After pack size #" << i << ": " << packedSizes[i]; LDBG() << "maps: " << llvm::interleaved(indexingMaps); LDBG() << "iterators: " << llvm::interleaved(iteratorTypes); LDBG() << "packedDimForEachOperand: " << llvm::interleaved(packedOperandsDims.packedDimForEachOperand); + + listOfPackedOperandsDim.pushBack(std::move(packedOperandsDims)); } // Step 2. Propagate packing to all LinalgOp operands. diff --git a/mlir/lib/Dialect/MemRef/IR/ValueBoundsOpInterfaceImpl.cpp b/mlir/lib/Dialect/MemRef/IR/ValueBoundsOpInterfaceImpl.cpp index 3aa801b..69afbca 100644 --- a/mlir/lib/Dialect/MemRef/IR/ValueBoundsOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/MemRef/IR/ValueBoundsOpInterfaceImpl.cpp @@ -107,7 +107,7 @@ struct CollapseShapeOpInterface assert(value == collapseOp.getResult() && "invalid value"); // Multiply the expressions for the dimensions in the reassociation group. - const ReassociationIndices &reassocIndices = + const ReassociationIndices reassocIndices = collapseOp.getReassociationIndices()[dim]; AffineExpr productExpr = cstr.getExpr(collapseOp.getSrc(), reassocIndices[0]); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp index f53d272..ffa8b40 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp @@ -152,19 +152,20 @@ IterationGraphSorter IterationGraphSorter::fromGenericOp( } IterationGraphSorter::IterationGraphSorter( - SmallVector<Value> &&ins, SmallVector<AffineMap> &&loop2InsLvl, Value out, - AffineMap loop2OutLvl, SmallVector<utils::IteratorType> &&iterTypes, + SmallVector<Value> &&insArg, SmallVector<AffineMap> &&loop2InsLvlArg, + Value out, AffineMap loop2OutLvl, + SmallVector<utils::IteratorType> &&iterTypesArg, sparse_tensor::LoopOrderingStrategy strategy) - : ins(std::move(ins)), loop2InsLvl(std::move(loop2InsLvl)), out(out), - loop2OutLvl(loop2OutLvl), iterTypes(std::move(iterTypes)), + : ins(std::move(insArg)), loop2InsLvl(std::move(loop2InsLvlArg)), out(out), + loop2OutLvl(loop2OutLvl), iterTypes(std::move(iterTypesArg)), strategy(strategy) { // One map per tensor. - assert(this->loop2InsLvl.size() == this->ins.size()); + assert(loop2InsLvl.size() == ins.size()); // All the affine maps have the same number of dimensions (loops). assert(llvm::all_equal(llvm::map_range( - this->loop2InsLvl, [](AffineMap m) { return m.getNumDims(); }))); + loop2InsLvl, [](AffineMap m) { return m.getNumDims(); }))); // The number of results of the map should match the rank of the tensor. - assert(llvm::all_of(llvm::zip(this->loop2InsLvl, this->ins), [](auto mvPair) { + assert(llvm::all_of(llvm::zip(loop2InsLvl, ins), [](auto mvPair) { auto [m, v] = mvPair; // For ranked types the rank must match. diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h index b2a16e9..35e58ed 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h @@ -59,10 +59,10 @@ public: private: // Private constructor. - IterationGraphSorter(SmallVector<Value> &&ins, - SmallVector<AffineMap> &&loop2InsLvl, Value out, + IterationGraphSorter(SmallVector<Value> &&insArg, + SmallVector<AffineMap> &&loop2InsLvlArg, Value out, AffineMap loop2OutLvl, - SmallVector<utils::IteratorType> &&iterTypes, + SmallVector<utils::IteratorType> &&iterTypesArg, sparse_tensor::LoopOrderingStrategy strategy = sparse_tensor::LoopOrderingStrategy::kDefault); diff --git a/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp index 1e3b377..549ac7a 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp @@ -77,7 +77,7 @@ FailureOr<TilingResult> tensor::replaceInsertSlicesWithTiledConsumer( dyn_cast<TilingInterface>(consumerOperands.front()->getOwner()); if (!consumerOp) return failure(); - for (auto opOperand : consumerOperands.drop_front()) { + for (auto *opOperand : consumerOperands.drop_front()) { if (opOperand->getOwner() != consumerOp) { LLVM_DEBUG({ llvm::dbgs() diff --git a/mlir/test/Dialect/LLVMIR/rocdl.mlir b/mlir/test/Dialect/LLVMIR/rocdl.mlir index d270ee8..e703600 100644 --- a/mlir/test/Dialect/LLVMIR/rocdl.mlir +++ b/mlir/test/Dialect/LLVMIR/rocdl.mlir @@ -664,6 +664,36 @@ llvm.func @rocdl.global.load.lds(%src : !llvm.ptr<1>, %dst: !llvm.ptr<3>) { llvm.return } +// CHECK-LABEL @rocdl.tensor.load.to.lds +llvm.func @rocdl.tensor.load.to.lds(%dgroup0 : vector<4xi32>, %dgroup1 : vector<8xi32>, + %dgroup2 : vector<4xi32>, %dgroup3 : vector<4xi32>) { + // CHECK: rocdl.tensor.load.to.lds %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} cachepolicy 0 : vector<4xi32>, vector<8xi32> + rocdl.tensor.load.to.lds %dgroup0, %dgroup1, %dgroup2, %dgroup3 cachepolicy 0 : vector<4xi32>, vector<8xi32> + llvm.return +} + +// CHECK-LABEL @rocdl.tensor.store.from.lds +llvm.func @rocdl.tensor.store.from.lds(%dgroup0 : vector<4xi32>, %dgroup1 : vector<8xi32>, + %dgroup2 : vector<4xi32>, %dgroup3 : vector<4xi32>) { + // CHECK: rocdl.tensor.store.from.lds %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} cachepolicy 0 : vector<4xi32>, vector<8xi32> + rocdl.tensor.store.from.lds %dgroup0, %dgroup1, %dgroup2, %dgroup3 cachepolicy 0 : vector<4xi32>, vector<8xi32> + llvm.return +} + +// CHECK-LABEL @rocdl.tensor.load.to.lds.d2 +llvm.func @rocdl.tensor.load.to.lds.d2(%dgroup0 : vector<4xi32>, %dgroup1 : vector<8xi32>) { + // CHECK: rocdl.tensor.load.to.lds.d2 %{{.*}}, %{{.*}} cachepolicy 0 : vector<4xi32>, vector<8xi32> + rocdl.tensor.load.to.lds.d2 %dgroup0, %dgroup1 cachepolicy 0 : vector<4xi32>, vector<8xi32> + llvm.return +} + +// CHECK-LABEL @rocdl.tensor.store.from.lds.d2 +llvm.func @rocdl.tensor.store.from.lds.d2(%dgroup0 : vector<4xi32>, %dgroup1 : vector<8xi32>) { + // CHECK: rocdl.tensor.store.from.lds.d2 %{{.*}}, %{{.*}} cachepolicy 0 : vector<4xi32>, vector<8xi32> + rocdl.tensor.store.from.lds.d2 %dgroup0, %dgroup1 cachepolicy 0 : vector<4xi32>, vector<8xi32> + llvm.return +} + llvm.func @rocdl.make.buffer.rsrc(%ptr : !llvm.ptr, %stride : i16, %numRecords : i64, diff --git a/mlir/test/Dialect/OpenACC/ops.mlir b/mlir/test/Dialect/OpenACC/ops.mlir index 77d18da..042ee25 100644 --- a/mlir/test/Dialect/OpenACC/ops.mlir +++ b/mlir/test/Dialect/OpenACC/ops.mlir @@ -2243,3 +2243,76 @@ func.func @test_firstprivate_map(%arg0: memref<10xf32>) { // CHECK-NEXT: acc.yield // CHECK-NEXT: } // CHECK-NEXT: return + +// ----- + +func.func @test_kernel_environment(%arg0: memref<1024xf32>, %arg1: memref<1024xf32>) { + %c1 = arith.constant 1 : index + %c1024 = arith.constant 1024 : index + + // Create data clause operands for the kernel environment + %copyin = acc.copyin varPtr(%arg0 : memref<1024xf32>) -> memref<1024xf32> + %create = acc.create varPtr(%arg1 : memref<1024xf32>) -> memref<1024xf32> + + // Kernel environment wraps gpu.launch and captures data mapping + acc.kernel_environment dataOperands(%copyin, %create : memref<1024xf32>, memref<1024xf32>) { + gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1, %grid_z = %c1) + threads(%tx, %ty, %tz) in (%block_x = %c1024, %block_y = %c1, %block_z = %c1) { + // Kernel body uses the mapped data + %val = memref.load %copyin[%tx] : memref<1024xf32> + %result = arith.mulf %val, %val : f32 + memref.store %result, %create[%tx] : memref<1024xf32> + gpu.terminator + } + } + + // Copy results back to host and deallocate device memory + acc.copyout accPtr(%create : memref<1024xf32>) to varPtr(%arg1 : memref<1024xf32>) + acc.delete accPtr(%copyin : memref<1024xf32>) + + return +} + +// CHECK-LABEL: func @test_kernel_environment +// CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%{{.*}} : memref<1024xf32>) -> memref<1024xf32> +// CHECK: %[[CREATE:.*]] = acc.create varPtr(%{{.*}} : memref<1024xf32>) -> memref<1024xf32> +// CHECK: acc.kernel_environment dataOperands(%[[COPYIN]], %[[CREATE]] : memref<1024xf32>, memref<1024xf32>) { +// CHECK: gpu.launch +// CHECK: memref.load %[[COPYIN]] +// CHECK: memref.store %{{.*}}, %[[CREATE]] +// CHECK: } +// CHECK: } +// CHECK: acc.copyout accPtr(%[[CREATE]] : memref<1024xf32>) to varPtr(%{{.*}} : memref<1024xf32>) +// CHECK: acc.delete accPtr(%[[COPYIN]] : memref<1024xf32>) + +// ----- + +func.func @test_kernel_environment_with_async(%arg0: memref<1024xf32>) { + %c1 = arith.constant 1 : index + %c1024 = arith.constant 1024 : index + %async_val = arith.constant 1 : i32 + + %create = acc.create varPtr(%arg0 : memref<1024xf32>) async(%async_val : i32) -> memref<1024xf32> + + // Kernel environment with async clause + acc.kernel_environment dataOperands(%create : memref<1024xf32>) async(%async_val : i32) { + gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1, %grid_z = %c1) + threads(%tx, %ty, %tz) in (%block_x = %c1024, %block_y = %c1, %block_z = %c1) { + %f0 = arith.constant 0.0 : f32 + memref.store %f0, %create[%tx] : memref<1024xf32> + gpu.terminator + } + } + + acc.copyout accPtr(%create : memref<1024xf32>) async(%async_val : i32) to varPtr(%arg0 : memref<1024xf32>) + + return +} + +// CHECK-LABEL: func @test_kernel_environment_with_async +// CHECK: %[[ASYNC:.*]] = arith.constant 1 : i32 +// CHECK: %[[CREATE:.*]] = acc.create varPtr(%{{.*}} : memref<1024xf32>) async(%[[ASYNC]] : i32) -> memref<1024xf32> +// CHECK: acc.kernel_environment dataOperands(%[[CREATE]] : memref<1024xf32>) async(%[[ASYNC]] : i32) +// CHECK: gpu.launch +// CHECK: memref.store %{{.*}}, %[[CREATE]] +// CHECK: acc.copyout accPtr(%[[CREATE]] : memref<1024xf32>) async(%[[ASYNC]] : i32) to varPtr(%{{.*}} : memref<1024xf32>) diff --git a/mlir/test/Target/LLVMIR/rocdl.mlir b/mlir/test/Target/LLVMIR/rocdl.mlir index 30126f6..8a848221 100644 --- a/mlir/test/Target/LLVMIR/rocdl.mlir +++ b/mlir/test/Target/LLVMIR/rocdl.mlir @@ -1040,6 +1040,36 @@ llvm.func @rocdl.global.load.lds(%src : !llvm.ptr<1>, %dst: !llvm.ptr<3>) { llvm.return } +// CHECK-LABEL: rocdl.tensor.load.to.lds +llvm.func @rocdl.tensor.load.to.lds(%dgroup0 : vector<4xi32>, %dgroup1 : vector<8xi32>, + %dgroup2 : vector<4xi32>, %dgroup3 : vector<4xi32>) { + // CHECK: call void @llvm.amdgcn.tensor.load.to.lds(<4 x i32> %{{.*}}, <8 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + rocdl.tensor.load.to.lds %dgroup0, %dgroup1, %dgroup2, %dgroup3 cachepolicy 0 : vector<4xi32>, vector<8xi32> + llvm.return +} + +// CHECK-LABEL: rocdl.tensor.store.from.lds +llvm.func @rocdl.tensor.store.from.lds(%dgroup0 : vector<4xi32>, %dgroup1 : vector<8xi32>, + %dgroup2 : vector<4xi32>, %dgroup3 : vector<4xi32>) { + // CHECK: call void @llvm.amdgcn.tensor.store.from.lds(<4 x i32> %{{.*}}, <8 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + rocdl.tensor.store.from.lds %dgroup0, %dgroup1, %dgroup2, %dgroup3 cachepolicy 0 : vector<4xi32>, vector<8xi32> + llvm.return +} + +// CHECK-LABEL: rocdl.tensor.load.to.lds.d2 +llvm.func @rocdl.tensor.load.to.lds.d2(%dgroup0 : vector<4xi32>, %dgroup1 : vector<8xi32>) { + // CHECK: call void @llvm.amdgcn.tensor.load.to.lds.d2(<4 x i32> %{{.*}}, <8 x i32> %{{.*}}, i32 0) + rocdl.tensor.load.to.lds.d2 %dgroup0, %dgroup1 cachepolicy 0 : vector<4xi32>, vector<8xi32> + llvm.return +} + +// CHECK-LABEL: rocdl.tensor.store.from.lds.d2 +llvm.func @rocdl.tensor.store.from.lds.d2(%dgroup0 : vector<4xi32>, %dgroup1 : vector<8xi32>) { + // CHECK: call void @llvm.amdgcn.tensor.store.from.lds.d2(<4 x i32> %{{.*}}, <8 x i32> %{{.*}}, i32 0) + rocdl.tensor.store.from.lds.d2 %dgroup0, %dgroup1 cachepolicy 0 : vector<4xi32>, vector<8xi32> + llvm.return +} + llvm.func @rocdl.make.buffer.rsrc(%ptr : !llvm.ptr, %stride : i16, %numRecords : i64, diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel index d528dae..e8561cc 100644 --- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -12086,6 +12086,7 @@ cc_library( srcs = glob(["lib/Dialect/Transform/TuneExtension/*.cpp"]), hdrs = glob(["include/mlir/Dialect/Transform/TuneExtension/*.h"]), deps = [ + ":ControlFlowInterfaces", ":IR", ":TransformDialect", ":TransformDialectInterfaces", |
